repo_id
stringclasses
55 values
file_path
stringlengths
42
186
content
stringlengths
1
333k
__index_level_0__
int64
0
0
mavonic_private_repos/transformers/src
mavonic_private_repos/transformers/src/transformers/modeling_outputs.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from dataclasses import dataclass from typing import Optional, Tuple import torch from .utils import ModelOutput @dataclass class BaseModelOutput(ModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class BaseModelOutputWithNoAttention(ModelOutput): """ Base class for model's outputs, with potential hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class BaseModelOutputWithPooling(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: torch.FloatTensor = None pooler_output: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class BaseModelOutputWithPoolingAndNoAttention(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state after a pooling operation on the spatial dimensions. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. """ last_hidden_state: torch.FloatTensor = None pooler_output: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class BaseModelOutputWithPast(ModelOutput): """ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class BaseModelOutputWithCrossAttentions(ModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class BaseModelOutputWithPoolingAndCrossAttentions(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. """ last_hidden_state: torch.FloatTensor = None pooler_output: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class BaseModelOutputWithPastAndCrossAttentions(ModelOutput): """ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ last_hidden_state: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class MoECausalLMOutputWithPast(ModelOutput): """ Base class for causal language model (or autoregressive) outputs as well as Mixture of Expert's router hidden states terms, to train a MoE model. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. z_loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided): z_loss for the sparse modules. aux_loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided): aux_loss for the sparse modules. router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Router logits of the encoder model, useful to compute the auxiliary loss and the z_loss for the sparse modules. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None z_loss: torch.FloatTensor = None aux_loss: torch.FloatTensor = None router_logits: Optional[Tuple[torch.FloatTensor]] = None @dataclass class MoEModelOutput(ModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. router_probs (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Raw router probabilities that are computed by MoE routers, these terms are used to compute the auxiliary loss and the z_loss for Mixture of Experts models. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None router_probs: Optional[Tuple[torch.FloatTensor]] = None @dataclass class MoeModelOutputWithPast(ModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Raw router logtis (post-softmax) that are computed by MoE routers, these terms are used to compute the auxiliary loss for Mixture of Experts models. """ last_hidden_state: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None router_logits: Optional[Tuple[torch.FloatTensor]] = None @dataclass class MoeCausalLMOutputWithPast(ModelOutput): """ Base class for causal language model (or autoregressive) with mixture of experts outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). aux_loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided): aux_loss for the sparse modules. router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Raw router logtis (post-softmax) that are computed by MoE routers, these terms are used to compute the auxiliary loss for Mixture of Experts models. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None aux_loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None router_logits: Optional[Tuple[torch.FloatTensor]] = None @dataclass class MoEModelOutputWithPastAndCrossAttentions(ModelOutput): """ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding) as well as Mixture of Expert's router hidden states terms, to train a MoE model. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. router_probs (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Raw router probabilities that are computed by MoE routers, these terms are used to compute the auxiliary loss and the z_loss for Mixture of Experts models. """ last_hidden_state: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None router_probs: Optional[Tuple[torch.FloatTensor]] = None @dataclass class Seq2SeqModelOutput(ModelOutput): """ Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential decoding. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class Seq2SeqMoEModelOutput(ModelOutput): """ Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential decoding. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. decoder_router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Router logits of the decoder model, useful to compute the auxiliary loss for Mixture of Experts models. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. encoder_router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Router logits of the encoder model, useful to compute the auxiliary loss and the z_loss for the sparse modules. """ last_hidden_state: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None decoder_router_logits: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_router_logits: Optional[Tuple[torch.FloatTensor]] = None @dataclass class CausalLMOutput(ModelOutput): """ Base class for causal language model (or autoregressive) outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class CausalLMOutputWithPast(ModelOutput): """ Base class for causal language model (or autoregressive) outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class CausalLMOutputWithCrossAttentions(ModelOutput): """ Base class for causal language model (or autoregressive) outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `torch.FloatTensor` tuples of length `config.n_layers`, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if `config.is_decoder = True`. Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class SequenceClassifierOutputWithPast(ModelOutput): """ Base class for outputs of sentence classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class MaskedLMOutput(ModelOutput): """ Base class for masked language models outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Masked language modeling (MLM) loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class Seq2SeqLMOutput(ModelOutput): """ Base class for sequence-to-sequence language models outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class Seq2SeqMoEOutput(ModelOutput): """ Base class for sequence-to-sequence language models outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. decoder_router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Router logits of the decoder model, useful to compute the auxiliary loss for Mixture of Experts models. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. encoder_router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Router logits of the encoder model, useful to compute the auxiliary loss and z_loss for Mixture of Experts models. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None encoder_z_loss: torch.FloatTensor = None decoder_z_loss: torch.FloatTensor = None encoder_aux_loss: torch.FloatTensor = None decoder_aux_loss: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None decoder_router_logits: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_router_logits: Optional[Tuple[torch.FloatTensor]] = None @dataclass class NextSentencePredictorOutput(ModelOutput): """ Base class for outputs of models predicting if two sentences are consecutive or not. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `next_sentence_label` is provided): Next sequence prediction (classification) loss. logits (`torch.FloatTensor` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class SequenceClassifierOutput(ModelOutput): """ Base class for outputs of sentence classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class Seq2SeqSequenceClassifierOutput(ModelOutput): """ Base class for outputs of sequence-to-sequence sentence classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `label` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class MultipleChoiceModelOutput(ModelOutput): """ Base class for outputs of multiple choice models. Args: loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): *num_choices* is the second dimension of the input tensors. (see *input_ids* above). Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class TokenClassifierOutput(ModelOutput): """ Base class for outputs of token classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) : Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class QuestionAnsweringModelOutput(ModelOutput): """ Base class for outputs of question answering models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-end scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_logits: torch.FloatTensor = None end_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class Seq2SeqQuestionAnsweringModelOutput(ModelOutput): """ Base class for outputs of sequence-to-sequence question answering models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-end scores (before SoftMax). past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_logits: torch.FloatTensor = None end_logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class SemanticSegmenterOutput(ModelOutput): """ Base class for outputs of semantic segmentation models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`): Classification scores for each pixel. <Tip warning={true}> The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the original image size as post-processing. You should always check your logits shape and resize as needed. </Tip> hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, patch_size, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class ImageClassifierOutput(ModelOutput): """ Base class for outputs of image classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the model at the output of each stage. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class ImageClassifierOutputWithNoAttention(ModelOutput): """ Base class for outputs of image classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the model at the output of each stage. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class DepthEstimatorOutput(ModelOutput): """ Base class for outputs of depth estimation models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. predicted_depth (`torch.FloatTensor` of shape `(batch_size, height, width)`): Predicted depth for each pixel. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None predicted_depth: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class ImageSuperResolutionOutput(ModelOutput): """ Base class for outputs of image super resolution models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Reconstruction loss. reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Reconstructed images, possibly upscaled. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the model at the output of each stage. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None reconstruction: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class Wav2Vec2BaseModelOutput(ModelOutput): """ Base class for models that have been trained with the Wav2Vec2 loss objective. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. extract_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, conv_dim[-1])`): Sequence of extracted feature vectors of the last convolutional layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: torch.FloatTensor = None extract_features: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class XVectorOutput(ModelOutput): """ Output type of [`Wav2Vec2ForXVector`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. logits (`torch.FloatTensor` of shape `(batch_size, config.xvector_output_dim)`): Classification hidden states before AMSoftmax. embeddings (`torch.FloatTensor` of shape `(batch_size, config.xvector_output_dim)`): Utterance embeddings used for vector similarity-based retrieval. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None embeddings: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class BackboneOutput(ModelOutput): """ Base class for outputs of backbones. Args: feature_maps (`tuple(torch.FloatTensor)` of shape `(batch_size, num_channels, height, width)`): Feature maps of the stages. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, num_channels, height, width)`, depending on the backbone. Hidden-states of the model at the output of each stage plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Only applicable if the backbone uses attention. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ feature_maps: Tuple[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class BaseModelOutputWithPoolingAndProjection(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. projection_state (`tuple(torch.FloatTensor)`, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` of shape `(batch_size,config.project_dim)`. Text embeddings before the projection layer, used to mimic the last hidden state of the teacher encoder. """ last_hidden_state: torch.FloatTensor = None pooler_output: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None projection_state: Optional[Tuple[torch.FloatTensor]] = None @dataclass class Seq2SeqSpectrogramOutput(ModelOutput): """ Base class for sequence-to-sequence spectrogram outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Spectrogram generation loss. spectrogram (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_bins)`): The predicted spectrogram. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None spectrogram: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class Seq2SeqTSModelOutput(ModelOutput): """ Base class for time series model's encoder outputs that also contains pre-computed hidden states that can speed up sequential decoding. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. loc (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*): Shift values of each time series' context window which is used to give the model inputs of the same magnitude and then used to shift back to the original magnitude. scale (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*): Scaling values of each time series' context window which is used to give the model inputs of the same magnitude and then used to rescale back to the original magnitude. static_features (`torch.FloatTensor` of shape `(batch_size, feature size)`, *optional*): Static features of each time series' in a batch which are copied to the covariates at inference time. """ last_hidden_state: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None loc: Optional[torch.FloatTensor] = None scale: Optional[torch.FloatTensor] = None static_features: Optional[torch.FloatTensor] = None @dataclass class Seq2SeqTSPredictionOutput(ModelOutput): """ Base class for time series model's decoder outputs that also contain the loss as well as the parameters of the chosen distribution. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when a `future_values` is provided): Distributional loss. params (`torch.FloatTensor` of shape `(batch_size, num_samples, num_params)`): Parameters of the chosen distribution. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. loc (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*): Shift values of each time series' context window which is used to give the model inputs of the same magnitude and then used to shift back to the original magnitude. scale (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*): Scaling values of each time series' context window which is used to give the model inputs of the same magnitude and then used to rescale back to the original magnitude. static_features (`torch.FloatTensor` of shape `(batch_size, feature size)`, *optional*): Static features of each time series' in a batch which are copied to the covariates at inference time. """ loss: Optional[torch.FloatTensor] = None params: Optional[Tuple[torch.FloatTensor]] = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None loc: Optional[torch.FloatTensor] = None scale: Optional[torch.FloatTensor] = None static_features: Optional[torch.FloatTensor] = None @dataclass class SampleTSPredictionOutput(ModelOutput): """ Base class for time series model's predictions outputs that contains the sampled values from the chosen distribution. Args: sequences (`torch.FloatTensor` of shape `(batch_size, num_samples, prediction_length)` or `(batch_size, num_samples, prediction_length, input_size)`): Sampled values from the chosen distribution. """ sequences: torch.FloatTensor = None @dataclass class MaskedImageModelingOutput(ModelOutput): """ Base class for outputs of masked image completion / in-painting models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided): Reconstruction loss. reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Reconstructed / completed images. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the model at the output of each stage. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None reconstruction: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @property def logits(self): warnings.warn( "logits attribute is deprecated and will be removed in version 5 of Transformers." " Please use the reconstruction attribute to retrieve the final output instead.", FutureWarning, ) return self.reconstruction
0
mavonic_private_repos/transformers/src
mavonic_private_repos/transformers/src/transformers/time_series_utils.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Time series distributional output classes and utilities. """ from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class AffineTransformed(TransformedDistribution): def __init__(self, base_distribution: Distribution, loc=None, scale=None, event_dim=0): self.scale = 1.0 if scale is None else scale self.loc = 0.0 if loc is None else loc super().__init__(base_distribution, [AffineTransform(loc=self.loc, scale=self.scale, event_dim=event_dim)]) @property def mean(self): """ Returns the mean of the distribution. """ return self.base_dist.mean * self.scale + self.loc @property def variance(self): """ Returns the variance of the distribution. """ return self.base_dist.variance * self.scale**2 @property def stddev(self): """ Returns the standard deviation of the distribution. """ return self.variance.sqrt() class ParameterProjection(nn.Module): def __init__( self, in_features: int, args_dim: Dict[str, int], domain_map: Callable[..., Tuple[torch.Tensor]], **kwargs ) -> None: super().__init__(**kwargs) self.args_dim = args_dim self.proj = nn.ModuleList([nn.Linear(in_features, dim) for dim in args_dim.values()]) self.domain_map = domain_map def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor]: params_unbounded = [proj(x) for proj in self.proj] return self.domain_map(*params_unbounded) class LambdaLayer(nn.Module): def __init__(self, function): super().__init__() self.function = function def forward(self, x, *args): return self.function(x, *args) class DistributionOutput: distribution_class: type in_features: int args_dim: Dict[str, int] def __init__(self, dim: int = 1) -> None: self.dim = dim self.args_dim = {k: dim * self.args_dim[k] for k in self.args_dim} def _base_distribution(self, distr_args): if self.dim == 1: return self.distribution_class(*distr_args) else: return Independent(self.distribution_class(*distr_args), 1) def distribution( self, distr_args, loc: Optional[torch.Tensor] = None, scale: Optional[torch.Tensor] = None, ) -> Distribution: distr = self._base_distribution(distr_args) if loc is None and scale is None: return distr else: return AffineTransformed(distr, loc=loc, scale=scale, event_dim=self.event_dim) @property def event_shape(self) -> Tuple: r""" Shape of each individual event contemplated by the distributions that this object constructs. """ return () if self.dim == 1 else (self.dim,) @property def event_dim(self) -> int: r""" Number of event dimensions, i.e., length of the `event_shape` tuple, of the distributions that this object constructs. """ return len(self.event_shape) @property def value_in_support(self) -> float: r""" A float that will have a valid numeric value when computing the log-loss of the corresponding distribution. By default 0.0. This value will be used when padding data series. """ return 0.0 def get_parameter_projection(self, in_features: int) -> nn.Module: r""" Return the parameter projection layer that maps the input to the appropriate parameters of the distribution. """ return ParameterProjection( in_features=in_features, args_dim=self.args_dim, domain_map=LambdaLayer(self.domain_map), ) def domain_map(self, *args: torch.Tensor): r""" Converts arguments to the right shape and domain. The domain depends on the type of distribution, while the correct shape is obtained by reshaping the trailing axis in such a way that the returned tensors define a distribution of the right event_shape. """ raise NotImplementedError() @staticmethod def squareplus(x: torch.Tensor) -> torch.Tensor: r""" Helper to map inputs to the positive orthant by applying the square-plus operation. Reference: https://twitter.com/jon_barron/status/1387167648669048833 """ return (x + torch.sqrt(torch.square(x) + 4.0)) / 2.0 class StudentTOutput(DistributionOutput): """ Student-T distribution output class. """ args_dim: Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} distribution_class: type = StudentT @classmethod def domain_map(cls, df: torch.Tensor, loc: torch.Tensor, scale: torch.Tensor): scale = cls.squareplus(scale).clamp_min(torch.finfo(scale.dtype).eps) df = 2.0 + cls.squareplus(df) return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1) class NormalOutput(DistributionOutput): """ Normal distribution output class. """ args_dim: Dict[str, int] = {"loc": 1, "scale": 1} distribution_class: type = Normal @classmethod def domain_map(cls, loc: torch.Tensor, scale: torch.Tensor): scale = cls.squareplus(scale).clamp_min(torch.finfo(scale.dtype).eps) return loc.squeeze(-1), scale.squeeze(-1) class NegativeBinomialOutput(DistributionOutput): """ Negative Binomial distribution output class. """ args_dim: Dict[str, int] = {"total_count": 1, "logits": 1} distribution_class: type = NegativeBinomial @classmethod def domain_map(cls, total_count: torch.Tensor, logits: torch.Tensor): total_count = cls.squareplus(total_count) return total_count.squeeze(-1), logits.squeeze(-1) def _base_distribution(self, distr_args) -> Distribution: total_count, logits = distr_args if self.dim == 1: return self.distribution_class(total_count=total_count, logits=logits) else: return Independent(self.distribution_class(total_count=total_count, logits=logits), 1) # Overwrites the parent class method. We cannot scale using the affine # transformation since negative binomial should return integers. Instead # we scale the parameters. def distribution( self, distr_args, loc: Optional[torch.Tensor] = None, scale: Optional[torch.Tensor] = None ) -> Distribution: total_count, logits = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits))
0
mavonic_private_repos/transformers/src
mavonic_private_repos/transformers/src/transformers/training_args_tf.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from dataclasses import dataclass, field from typing import Optional, Tuple from .training_args import TrainingArguments from .utils import cached_property, is_tf_available, logging, requires_backends logger = logging.get_logger(__name__) if is_tf_available(): import tensorflow as tf from .modeling_tf_utils import keras @dataclass class TFTrainingArguments(TrainingArguments): """ TrainingArguments is the subset of the arguments we use in our example scripts **which relate to the training loop itself**. Using [`HfArgumentParser`] we can turn this class into [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the command line. Parameters: output_dir (`str`): The output directory where the model predictions and checkpoints will be written. overwrite_output_dir (`bool`, *optional*, defaults to `False`): If `True`, overwrite the content of the output directory. Use this to continue training if `output_dir` points to a checkpoint directory. do_train (`bool`, *optional*, defaults to `False`): Whether to run training or not. This argument is not directly used by [`Trainer`], it's intended to be used by your training/evaluation scripts instead. See the [example scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details. do_eval (`bool`, *optional*): Whether to run evaluation on the validation set or not. Will be set to `True` if `eval_strategy` is different from `"no"`. This argument is not directly used by [`Trainer`], it's intended to be used by your training/evaluation scripts instead. See the [example scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details. do_predict (`bool`, *optional*, defaults to `False`): Whether to run predictions on the test set or not. This argument is not directly used by [`Trainer`], it's intended to be used by your training/evaluation scripts instead. See the [example scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details. eval_strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"no"`): The evaluation strategy to adopt during training. Possible values are: - `"no"`: No evaluation is done during training. - `"steps"`: Evaluation is done (and logged) every `eval_steps`. - `"epoch"`: Evaluation is done at the end of each epoch. per_device_train_batch_size (`int`, *optional*, defaults to 8): The batch size per GPU/TPU core/CPU for training. per_device_eval_batch_size (`int`, *optional*, defaults to 8): The batch size per GPU/TPU core/CPU for evaluation. gradient_accumulation_steps (`int`, *optional*, defaults to 1): Number of updates steps to accumulate the gradients for, before performing a backward/update pass. <Tip warning={true}> When using gradient accumulation, one step is counted as one step with backward pass. Therefore, logging, evaluation, save will be conducted every `gradient_accumulation_steps * xxx_step` training examples. </Tip> learning_rate (`float`, *optional*, defaults to 5e-5): The initial learning rate for Adam. weight_decay (`float`, *optional*, defaults to 0): The weight decay to apply (if not zero). adam_beta1 (`float`, *optional*, defaults to 0.9): The beta1 hyperparameter for the Adam optimizer. adam_beta2 (`float`, *optional*, defaults to 0.999): The beta2 hyperparameter for the Adam optimizer. adam_epsilon (`float`, *optional*, defaults to 1e-8): The epsilon hyperparameter for the Adam optimizer. max_grad_norm (`float`, *optional*, defaults to 1.0): Maximum gradient norm (for gradient clipping). num_train_epochs(`float`, *optional*, defaults to 3.0): Total number of training epochs to perform. max_steps (`int`, *optional*, defaults to -1): If set to a positive number, the total number of training steps to perform. Overrides `num_train_epochs`. For a finite dataset, training is reiterated through the dataset (if all data is exhausted) until `max_steps` is reached. warmup_ratio (`float`, *optional*, defaults to 0.0): Ratio of total training steps used for a linear warmup from 0 to `learning_rate`. warmup_steps (`int`, *optional*, defaults to 0): Number of steps used for a linear warmup from 0 to `learning_rate`. Overrides any effect of `warmup_ratio`. logging_dir (`str`, *optional*): [TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to *runs/**CURRENT_DATETIME_HOSTNAME***. logging_strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"steps"`): The logging strategy to adopt during training. Possible values are: - `"no"`: No logging is done during training. - `"epoch"`: Logging is done at the end of each epoch. - `"steps"`: Logging is done every `logging_steps`. logging_first_step (`bool`, *optional*, defaults to `False`): Whether to log and evaluate the first `global_step` or not. logging_steps (`int`, *optional*, defaults to 500): Number of update steps between two logs if `logging_strategy="steps"`. save_strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"steps"`): The checkpoint save strategy to adopt during training. Possible values are: - `"no"`: No save is done during training. - `"epoch"`: Save is done at the end of each epoch. - `"steps"`: Save is done every `save_steps`. save_steps (`int`, *optional*, defaults to 500): Number of updates steps before two checkpoint saves if `save_strategy="steps"`. save_total_limit (`int`, *optional*): If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in `output_dir`. no_cuda (`bool`, *optional*, defaults to `False`): Whether to not use CUDA even when it is available or not. seed (`int`, *optional*, defaults to 42): Random seed that will be set at the beginning of training. fp16 (`bool`, *optional*, defaults to `False`): Whether to use 16-bit (mixed) precision training (through NVIDIA Apex) instead of 32-bit training. fp16_opt_level (`str`, *optional*, defaults to 'O1'): For `fp16` training, Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details on the [Apex documentation](https://nvidia.github.io/apex/amp). local_rank (`int`, *optional*, defaults to -1): During distributed training, the rank of the process. tpu_num_cores (`int`, *optional*): When training on TPU, the number of TPU cores (automatically passed by launcher script). debug (`bool`, *optional*, defaults to `False`): Whether to activate the trace to record computation graphs and profiling information or not. dataloader_drop_last (`bool`, *optional*, defaults to `False`): Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size) or not. eval_steps (`int`, *optional*, defaults to 1000): Number of update steps before two evaluations. past_index (`int`, *optional*, defaults to -1): Some models like [TransformerXL](../model_doc/transformerxl) or :doc*XLNet <../model_doc/xlnet>* can make use of the past hidden states for their predictions. If this argument is set to a positive int, the `Trainer` will use the corresponding output (usually index 2) as the past state and feed it to the model at the next training step under the keyword argument `mems`. tpu_name (`str`, *optional*): The name of the TPU the process is running on. tpu_zone (`str`, *optional*): The zone of the TPU the process is running on. If not specified, we will attempt to automatically detect from metadata. gcp_project (`str`, *optional*): Google Cloud Project name for the Cloud TPU-enabled project. If not specified, we will attempt to automatically detect from metadata. run_name (`str`, *optional*): A descriptor for the run. Notably used for wandb logging. xla (`bool`, *optional*): Whether to activate the XLA compilation or not. """ framework = "tf" tpu_name: Optional[str] = field( default=None, metadata={"help": "Name of TPU"}, ) tpu_zone: Optional[str] = field( default=None, metadata={"help": "Zone of TPU"}, ) gcp_project: Optional[str] = field( default=None, metadata={"help": "Name of Cloud TPU-enabled project"}, ) poly_power: float = field( default=1.0, metadata={"help": "Power for the Polynomial decay LR scheduler."}, ) xla: bool = field(default=False, metadata={"help": "Whether to activate the XLA compilation or not"}) @cached_property def _setup_strategy(self) -> Tuple["tf.distribute.Strategy", int]: requires_backends(self, ["tf"]) logger.info("Tensorflow: setting up strategy") gpus = tf.config.list_physical_devices("GPU") # Set to float16 at first if self.fp16: keras.mixed_precision.set_global_policy("mixed_float16") if self.no_cuda: strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0") else: try: if self.tpu_name: tpu = tf.distribute.cluster_resolver.TPUClusterResolver( self.tpu_name, zone=self.tpu_zone, project=self.gcp_project ) else: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: if self.tpu_name: raise RuntimeError(f"Couldn't connect to TPU {self.tpu_name}!") else: tpu = None if tpu: # Set to bfloat16 in case of TPU if self.fp16: keras.mixed_precision.set_global_policy("mixed_bfloat16") tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.TPUStrategy(tpu) elif len(gpus) == 0: strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0") elif len(gpus) == 1: strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0") elif len(gpus) > 1: # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` strategy = tf.distribute.MirroredStrategy() else: raise ValueError("Cannot find the proper strategy, please check your environment properties.") return strategy @property def strategy(self) -> "tf.distribute.Strategy": """ The strategy used for distributed training. """ requires_backends(self, ["tf"]) return self._setup_strategy @property def n_replicas(self) -> int: """ The number of replicas (CPUs, GPUs or TPU cores) used in this training. """ requires_backends(self, ["tf"]) return self._setup_strategy.num_replicas_in_sync @property def should_log(self): """ Whether or not the current process should produce log. """ return False # TF Logging is handled by Keras not the Trainer @property def train_batch_size(self) -> int: """ The actual batch size for training (may differ from `per_gpu_train_batch_size` in distributed training). """ if self.per_gpu_train_batch_size: logger.warning( "Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future " "version. Using `--per_device_train_batch_size` is preferred." ) per_device_batch_size = self.per_gpu_train_batch_size or self.per_device_train_batch_size return per_device_batch_size * self.n_replicas @property def eval_batch_size(self) -> int: """ The actual batch size for evaluation (may differ from `per_gpu_eval_batch_size` in distributed training). """ if self.per_gpu_eval_batch_size: logger.warning( "Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future " "version. Using `--per_device_eval_batch_size` is preferred." ) per_device_batch_size = self.per_gpu_eval_batch_size or self.per_device_eval_batch_size return per_device_batch_size * self.n_replicas @property def n_gpu(self) -> int: """ The number of replicas (CPUs, GPUs or TPU cores) used in this training. """ requires_backends(self, ["tf"]) warnings.warn( "The n_gpu argument is deprecated and will be removed in a future version, use n_replicas instead.", FutureWarning, ) return self._setup_strategy.num_replicas_in_sync
0
mavonic_private_repos/transformers/src
mavonic_private_repos/transformers/src/transformers/trainer_callback.py
# coding=utf-8 # Copyright 2020-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Callbacks to use with the Trainer class and customize the training loop. """ import copy import dataclasses import json from dataclasses import dataclass from typing import Dict, List, Optional, Union import numpy as np from tqdm.auto import tqdm from .trainer_utils import IntervalStrategy, has_length from .training_args import TrainingArguments from .utils import logging logger = logging.get_logger(__name__) @dataclass class TrainerState: """ A class containing the [`Trainer`] inner state that will be saved along the model and optimizer when checkpointing and passed to the [`TrainerCallback`]. <Tip> In all this class, one step is to be understood as one update step. When using gradient accumulation, one update step may require several forward and backward passes: if you use `gradient_accumulation_steps=n`, then one update step requires going through *n* batches. </Tip> Args: epoch (`float`, *optional*): Only set during training, will represent the epoch the training is at (the decimal part being the percentage of the current epoch completed). global_step (`int`, *optional*, defaults to 0): During training, represents the number of update steps completed. max_steps (`int`, *optional*, defaults to 0): The number of update steps to do during the current training. logging_steps (`int`, *optional*, defaults to 500): Log every X updates steps eval_steps (`int`, *optional*): Run an evaluation every X steps. save_steps (`int`, *optional*, defaults to 500): Save checkpoint every X updates steps. train_batch_size (`int`, *optional*): The batch size for the training dataloader. Only needed when `auto_find_batch_size` has been used. num_input_tokens_seen (`int`, *optional*, defaults to 0): The number of tokens seen during training (number of input tokens, not the number of prediction tokens). total_flos (`float`, *optional*, defaults to 0): The total number of floating operations done by the model since the beginning of training (stored as floats to avoid overflow). log_history (`List[Dict[str, float]]`, *optional*): The list of logs done since the beginning of training. best_metric (`float`, *optional*): When tracking the best model, the value of the best metric encountered so far. best_model_checkpoint (`str`, *optional*): When tracking the best model, the value of the name of the checkpoint for the best model encountered so far. is_local_process_zero (`bool`, *optional*, defaults to `True`): Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several machines) main process. is_world_process_zero (`bool`, *optional*, defaults to `True`): Whether or not this process is the global main process (when training in a distributed fashion on several machines, this is only going to be `True` for one process). is_hyper_param_search (`bool`, *optional*, defaults to `False`): Whether we are in the process of a hyper parameter search using Trainer.hyperparameter_search. This will impact the way data will be logged in TensorBoard. stateful_callbacks (`List[StatefulTrainerCallback]`, *optional*): Callbacks attached to the `Trainer` that should have their states be saved or restored. Relevent callbacks should implement a `state` and `from_state` function. """ epoch: Optional[float] = None global_step: int = 0 max_steps: int = 0 logging_steps: int = 500 eval_steps: int = 500 save_steps: int = 500 train_batch_size: int = None num_train_epochs: int = 0 num_input_tokens_seen: int = 0 total_flos: float = 0 log_history: List[Dict[str, float]] = None best_metric: Optional[float] = None best_model_checkpoint: Optional[str] = None is_local_process_zero: bool = True is_world_process_zero: bool = True is_hyper_param_search: bool = False trial_name: str = None trial_params: Dict[str, Union[str, float, int, bool]] = None stateful_callbacks: List["TrainerCallback"] = None def __post_init__(self): if self.log_history is None: self.log_history = [] if self.stateful_callbacks is None: self.stateful_callbacks = {} elif isinstance(self.stateful_callbacks, dict): # We are loading the callbacks in from the state file, no need to process them pass else: # Saveable callbacks get stored as dict of kwargs stateful_callbacks = {} for callback in self.stateful_callbacks: if not isinstance(callback, (ExportableState)): raise TypeError( f"All callbacks passed to be saved must inherit `ExportableState`, but received {type(callback)}" ) name = callback.__class__.__name__ if name in stateful_callbacks: # We can have multiple versions of the same callback # if so, we store them as a list of states to restore if not isinstance(stateful_callbacks[name], list): stateful_callbacks[name] = [stateful_callbacks[name]] stateful_callbacks[name].append(callback.state()) else: stateful_callbacks[name] = callback.state() self.stateful_callbacks = stateful_callbacks def save_to_json(self, json_path: str): """Save the content of this instance in JSON format inside `json_path`.""" json_string = json.dumps(dataclasses.asdict(self), indent=2, sort_keys=True) + "\n" with open(json_path, "w", encoding="utf-8") as f: f.write(json_string) @classmethod def load_from_json(cls, json_path: str): """Create an instance from the content of `json_path`.""" with open(json_path, "r", encoding="utf-8") as f: text = f.read() return cls(**json.loads(text)) class ExportableState: """ A class for objects that include the ability to have its state be saved during `Trainer._save_checkpoint` and loaded back in during `Trainer._load_from_checkpoint`. These must implement a `state` function that gets called during the respective Trainer function call. It should only include parameters and attributes needed to recreate the state at a particular time, to avoid utilizing pickle/maintain standard file IO writing. Example: ```python class EarlyStoppingCallback(TrainerCallback, ExportableState): def __init__(self, early_stopping_patience: int = 1, early_stopping_threshold: Optional[float] = 0.0): self.early_stopping_patience = early_stopping_patience self.early_stopping_threshold = early_stopping_threshold # early_stopping_patience_counter denotes the number of times validation metrics failed to improve. self.early_stopping_patience_counter = 0 def state(self) -> dict: return { "args": { "early_stopping_patience": self.early_stopping_patience, "early_stopping_threshold": self.early_stopping_threshold, }, "attributes": { "early_stopping_patience_counter": self.early_stopping_patience_counter, } } ```""" def state(self) -> dict: raise NotImplementedError("You must implement a `state` function to utilize this class.") @classmethod def from_state(cls, state): instance = cls(**state["args"]) for k, v in state["attributes"].items(): setattr(instance, k, v) return instance @dataclass class TrainerControl(ExportableState): """ A class that handles the [`Trainer`] control flow. This class is used by the [`TrainerCallback`] to activate some switches in the training loop. Args: should_training_stop (`bool`, *optional*, defaults to `False`): Whether or not the training should be interrupted. If `True`, this variable will not be set back to `False`. The training will just stop. should_epoch_stop (`bool`, *optional*, defaults to `False`): Whether or not the current epoch should be interrupted. If `True`, this variable will be set back to `False` at the beginning of the next epoch. should_save (`bool`, *optional*, defaults to `False`): Whether or not the model should be saved at this step. If `True`, this variable will be set back to `False` at the beginning of the next step. should_evaluate (`bool`, *optional*, defaults to `False`): Whether or not the model should be evaluated at this step. If `True`, this variable will be set back to `False` at the beginning of the next step. should_log (`bool`, *optional*, defaults to `False`): Whether or not the logs should be reported at this step. If `True`, this variable will be set back to `False` at the beginning of the next step. """ should_training_stop: bool = False should_epoch_stop: bool = False should_save: bool = False should_evaluate: bool = False should_log: bool = False def _new_training(self): """Internal method that resets the variable for a new training.""" self.should_training_stop = False def _new_epoch(self): """Internal method that resets the variable for a new epoch.""" self.should_epoch_stop = False def _new_step(self): """Internal method that resets the variable for a new step.""" self.should_save = False self.should_evaluate = False self.should_log = False def state(self) -> dict: return { "args": { "should_training_stop": self.should_training_stop, "should_epoch_stop": self.should_epoch_stop, "should_save": self.should_save, "should_evaluate": self.should_evaluate, "should_log": self.should_log, }, "attributes": {}, } class TrainerCallback: # no-format """ A class for objects that will inspect the state of the training loop at some events and take some decisions. At each of those events the following arguments are available: Args: args ([`TrainingArguments`]): The training arguments used to instantiate the [`Trainer`]. state ([`TrainerState`]): The current state of the [`Trainer`]. control ([`TrainerControl`]): The object that is returned to the [`Trainer`] and can be used to make some decisions. model ([`PreTrainedModel`] or `torch.nn.Module`): The model being trained. tokenizer ([`PreTrainedTokenizer`]): The tokenizer used for encoding the data. optimizer (`torch.optim.Optimizer`): The optimizer used for the training steps. lr_scheduler (`torch.optim.lr_scheduler.LambdaLR`): The scheduler used for setting the learning rate. train_dataloader (`torch.utils.data.DataLoader`, *optional*): The current dataloader used for training. eval_dataloader (`torch.utils.data.DataLoader`, *optional*): The current dataloader used for evaluation. metrics (`Dict[str, float]`): The metrics computed by the last evaluation phase. Those are only accessible in the event `on_evaluate`. logs (`Dict[str, float]`): The values to log. Those are only accessible in the event `on_log`. The `control` object is the only one that can be changed by the callback, in which case the event that changes it should return the modified version. The argument `args`, `state` and `control` are positionals for all events, all the others are grouped in `kwargs`. You can unpack the ones you need in the signature of the event using them. As an example, see the code of the simple [`~transformers.PrinterCallback`]. Example: ```python class PrinterCallback(TrainerCallback): def on_log(self, args, state, control, logs=None, **kwargs): _ = logs.pop("total_flos", None) if state.is_local_process_zero: print(logs) ```""" def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the end of the initialization of the [`Trainer`]. """ pass def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the beginning of training. """ pass def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the end of training. """ pass def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the beginning of an epoch. """ pass def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the end of an epoch. """ pass def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the beginning of a training step. If using gradient accumulation, one training step might take several inputs. """ pass def on_substep_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the end of an substep during gradient accumulation. """ pass def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the end of a training step. If using gradient accumulation, one training step might take several inputs. """ pass def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called after an evaluation phase. """ pass def on_predict(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics, **kwargs): """ Event called after a successful prediction. """ pass def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called after a checkpoint save. """ pass def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called after logging the last logs. """ pass def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called after a prediction step. """ pass class CallbackHandler(TrainerCallback): """Internal class that just calls the list of callbacks in order.""" def __init__(self, callbacks, model, tokenizer, optimizer, lr_scheduler): self.callbacks = [] for cb in callbacks: self.add_callback(cb) self.model = model self.tokenizer = tokenizer self.optimizer = optimizer self.lr_scheduler = lr_scheduler self.train_dataloader = None self.eval_dataloader = None if not any(isinstance(cb, DefaultFlowCallback) for cb in self.callbacks): logger.warning( "The Trainer will not work properly if you don't have a `DefaultFlowCallback` in its callbacks. You\n" + "should add one before training with `trainer.add_callback(DefaultFlowCallback). The current list of" + "callbacks is\n:" + self.callback_list ) def add_callback(self, callback): cb = callback() if isinstance(callback, type) else callback cb_class = callback if isinstance(callback, type) else callback.__class__ if cb_class in [c.__class__ for c in self.callbacks]: logger.warning( f"You are adding a {cb_class} to the callbacks of this Trainer, but there is already one. The current" + "list of callbacks is\n:" + self.callback_list ) self.callbacks.append(cb) def pop_callback(self, callback): if isinstance(callback, type): for cb in self.callbacks: if isinstance(cb, callback): self.callbacks.remove(cb) return cb else: for cb in self.callbacks: if cb == callback: self.callbacks.remove(cb) return cb def remove_callback(self, callback): if isinstance(callback, type): for cb in self.callbacks: if isinstance(cb, callback): self.callbacks.remove(cb) return else: self.callbacks.remove(callback) @property def callback_list(self): return "\n".join(cb.__class__.__name__ for cb in self.callbacks) def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_init_end", args, state, control) def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): control.should_training_stop = False return self.call_event("on_train_begin", args, state, control) def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_train_end", args, state, control) def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): control.should_epoch_stop = False return self.call_event("on_epoch_begin", args, state, control) def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_epoch_end", args, state, control) def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): control.should_log = False control.should_evaluate = False control.should_save = False return self.call_event("on_step_begin", args, state, control) def on_substep_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_substep_end", args, state, control) def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_step_end", args, state, control) def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics): control.should_evaluate = False return self.call_event("on_evaluate", args, state, control, metrics=metrics) def on_predict(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics): return self.call_event("on_predict", args, state, control, metrics=metrics) def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): control.should_save = False return self.call_event("on_save", args, state, control) def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, logs): control.should_log = False return self.call_event("on_log", args, state, control, logs=logs) def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_prediction_step", args, state, control) def call_event(self, event, args, state, control, **kwargs): for callback in self.callbacks: result = getattr(callback, event)( args, state, control, model=self.model, tokenizer=self.tokenizer, optimizer=self.optimizer, lr_scheduler=self.lr_scheduler, train_dataloader=self.train_dataloader, eval_dataloader=self.eval_dataloader, **kwargs, ) # A Callback can skip the return of `control` if it doesn't change it. if result is not None: control = result return control class DefaultFlowCallback(TrainerCallback): """ A [`TrainerCallback`] that handles the default flow of the training loop for logs, evaluation and checkpoints. """ def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): # Log if state.global_step == 1 and args.logging_first_step: control.should_log = True if args.logging_strategy == IntervalStrategy.STEPS and state.global_step % state.logging_steps == 0: control.should_log = True # Evaluate if ( args.eval_strategy == IntervalStrategy.STEPS and state.global_step % state.eval_steps == 0 and args.eval_delay <= state.global_step ): control.should_evaluate = True # Save if ( args.save_strategy == IntervalStrategy.STEPS and state.save_steps > 0 and state.global_step % state.save_steps == 0 ): control.should_save = True # End training if state.global_step >= state.max_steps: control.should_training_stop = True return control def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): # Log if args.logging_strategy == IntervalStrategy.EPOCH: control.should_log = True # Evaluate if args.eval_strategy == IntervalStrategy.EPOCH and args.eval_delay <= state.epoch: control.should_evaluate = True # Save if args.save_strategy == IntervalStrategy.EPOCH: control.should_save = True return control class ProgressCallback(TrainerCallback): """ A [`TrainerCallback`] that displays the progress of training or evaluation. """ def __init__(self): self.training_bar = None self.prediction_bar = None def on_train_begin(self, args, state, control, **kwargs): if state.is_world_process_zero: self.training_bar = tqdm(total=state.max_steps, dynamic_ncols=True) self.current_step = 0 def on_step_end(self, args, state, control, **kwargs): if state.is_world_process_zero: self.training_bar.update(state.global_step - self.current_step) self.current_step = state.global_step def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs): if state.is_world_process_zero and has_length(eval_dataloader): if self.prediction_bar is None: self.prediction_bar = tqdm( total=len(eval_dataloader), leave=self.training_bar is None, dynamic_ncols=True ) self.prediction_bar.update(1) def on_evaluate(self, args, state, control, **kwargs): if state.is_world_process_zero: if self.prediction_bar is not None: self.prediction_bar.close() self.prediction_bar = None def on_predict(self, args, state, control, **kwargs): if state.is_world_process_zero: if self.prediction_bar is not None: self.prediction_bar.close() self.prediction_bar = None def on_log(self, args, state, control, logs=None, **kwargs): if state.is_world_process_zero and self.training_bar is not None: # avoid modifying the logs object as it is shared between callbacks logs = copy.deepcopy(logs) _ = logs.pop("total_flos", None) # round numbers so that it looks better in console if "epoch" in logs: logs["epoch"] = round(logs["epoch"], 2) self.training_bar.write(str(logs)) def on_train_end(self, args, state, control, **kwargs): if state.is_world_process_zero: self.training_bar.close() self.training_bar = None class PrinterCallback(TrainerCallback): """ A bare [`TrainerCallback`] that just prints the logs. """ def on_log(self, args, state, control, logs=None, **kwargs): _ = logs.pop("total_flos", None) if state.is_local_process_zero: print(logs) class EarlyStoppingCallback(TrainerCallback, ExportableState): """ A [`TrainerCallback`] that handles early stopping. Args: early_stopping_patience (`int`): Use with `metric_for_best_model` to stop training when the specified metric worsens for `early_stopping_patience` evaluation calls. early_stopping_threshold(`float`, *optional*): Use with TrainingArguments `metric_for_best_model` and `early_stopping_patience` to denote how much the specified metric must improve to satisfy early stopping conditions. ` This callback depends on [`TrainingArguments`] argument *load_best_model_at_end* functionality to set best_metric in [`TrainerState`]. Note that if the [`TrainingArguments`] argument *save_steps* differs from *eval_steps*, the early stopping will not occur until the next save step. """ def __init__(self, early_stopping_patience: int = 1, early_stopping_threshold: Optional[float] = 0.0): self.early_stopping_patience = early_stopping_patience self.early_stopping_threshold = early_stopping_threshold # early_stopping_patience_counter denotes the number of times validation metrics failed to improve. self.early_stopping_patience_counter = 0 def check_metric_value(self, args, state, control, metric_value): # best_metric is set by code for load_best_model operator = np.greater if args.greater_is_better else np.less if state.best_metric is None or ( operator(metric_value, state.best_metric) and abs(metric_value - state.best_metric) > self.early_stopping_threshold ): self.early_stopping_patience_counter = 0 else: self.early_stopping_patience_counter += 1 def on_train_begin(self, args, state, control, **kwargs): assert args.load_best_model_at_end, "EarlyStoppingCallback requires load_best_model_at_end = True" assert ( args.metric_for_best_model is not None ), "EarlyStoppingCallback requires metric_for_best_model is defined" assert ( args.eval_strategy != IntervalStrategy.NO ), "EarlyStoppingCallback requires IntervalStrategy of steps or epoch" def on_evaluate(self, args, state, control, metrics, **kwargs): metric_to_check = args.metric_for_best_model if not metric_to_check.startswith("eval_"): metric_to_check = f"eval_{metric_to_check}" metric_value = metrics.get(metric_to_check) if metric_value is None: logger.warning( f"early stopping required metric_for_best_model, but did not find {metric_to_check} so early stopping" " is disabled" ) return self.check_metric_value(args, state, control, metric_value) if self.early_stopping_patience_counter >= self.early_stopping_patience: control.should_training_stop = True def state(self) -> dict: return { "args": { "early_stopping_patience": self.early_stopping_patience, "early_stopping_threshold": self.early_stopping_threshold, }, "attributes": { "early_stopping_patience_counter": self.early_stopping_patience_counter, }, }
0
mavonic_private_repos/transformers/src
mavonic_private_repos/transformers/src/transformers/training_args_seq2seq.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings logger = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__) class Seq2SeqTrainingArguments(TrainingArguments): """ Args: sortish_sampler (`bool`, *optional*, defaults to `False`): Whether to use a *sortish sampler* or not. Only possible if the underlying datasets are *Seq2SeqDataset* for now but will become generally available in the near future. It sorts the inputs according to lengths in order to minimize the padding size, with a bit of randomness for the training set. predict_with_generate (`bool`, *optional*, defaults to `False`): Whether to use generate to calculate generative metrics (ROUGE, BLEU). generation_max_length (`int`, *optional*): The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default to the `max_length` value of the model configuration. generation_num_beams (`int`, *optional*): The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default to the `num_beams` value of the model configuration. generation_config (`str` or `Path` or [`~generation.GenerationConfig`], *optional*): Allows to load a [`~generation.GenerationConfig`] from the `from_pretrained` method. This can be either: - a string, the *model id* of a pretrained model configuration hosted inside a model repo on huggingface.co. - a path to a *directory* containing a configuration file saved using the [`~GenerationConfig.save_pretrained`] method, e.g., `./my_model_directory/`. - a [`~generation.GenerationConfig`] object. """ sortish_sampler: bool = field(default=False, metadata={"help": "Whether to use SortishSampler or not."}) predict_with_generate: bool = field( default=False, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) generation_max_length: Optional[int] = field( default=None, metadata={ "help": ( "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `max_length` value of the model configuration." ) }, ) generation_num_beams: Optional[int] = field( default=None, metadata={ "help": ( "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `num_beams` value of the model configuration." ) }, ) generation_config: Optional[Union[str, Path, GenerationConfig]] = field( default=None, metadata={ "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." }, ) def to_dict(self): """ Serializes this instance while replace `Enum` by their values and `GenerationConfig` by dictionaries (for JSON serialization support). It obfuscates the token values by removing their value. """ # filter out fields that are defined as field(init=False) d = super().to_dict() for k, v in d.items(): if isinstance(v, GenerationConfig): d[k] = v.to_dict() return d
0
mavonic_private_repos/transformers/src
mavonic_private_repos/transformers/src/transformers/feature_extraction_utils.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Feature extraction saving/loading class for common feature extractors. """ import copy import json import os import warnings from collections import UserDict from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union import numpy as np from .dynamic_module_utils import custom_object_save from .utils import ( FEATURE_EXTRACTOR_NAME, PushToHubMixin, TensorType, add_model_info_to_auto_map, cached_file, copy_func, download_url, is_flax_available, is_jax_tensor, is_numpy_array, is_offline_mode, is_remote_url, is_tf_available, is_torch_available, is_torch_device, is_torch_dtype, logging, requires_backends, ) if TYPE_CHECKING: if is_torch_available(): import torch # noqa logger = logging.get_logger(__name__) PreTrainedFeatureExtractor = Union["SequenceFeatureExtractor"] # noqa: F821 class BatchFeature(UserDict): r""" Holds the output of the [`~SequenceFeatureExtractor.pad`] and feature extractor specific `__call__` methods. This class is derived from a python dictionary and can be used as a dictionary. Args: data (`dict`, *optional*): Dictionary of lists/arrays/tensors returned by the __call__/pad methods ('input_values', 'attention_mask', etc.). tensor_type (`Union[None, str, TensorType]`, *optional*): You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at initialization. """ def __init__(self, data: Optional[Dict[str, Any]] = None, tensor_type: Union[None, str, TensorType] = None): super().__init__(data) self.convert_to_tensors(tensor_type=tensor_type) def __getitem__(self, item: str) -> Union[Any]: """ If the key is a string, returns the value of the dict associated to `key` ('input_values', 'attention_mask', etc.). """ if isinstance(item, str): return self.data[item] else: raise KeyError("Indexing with integers is not available when using Python based feature extractors") def __getattr__(self, item: str): try: return self.data[item] except KeyError: raise AttributeError def __getstate__(self): return {"data": self.data} def __setstate__(self, state): if "data" in state: self.data = state["data"] # Copied from transformers.tokenization_utils_base.BatchEncoding.keys def keys(self): return self.data.keys() # Copied from transformers.tokenization_utils_base.BatchEncoding.values def values(self): return self.data.values() # Copied from transformers.tokenization_utils_base.BatchEncoding.items def items(self): return self.data.items() def _get_is_as_tensor_fns(self, tensor_type: Optional[Union[str, TensorType]] = None): if tensor_type is None: return None, None # Convert to TensorType if not isinstance(tensor_type, TensorType): tensor_type = TensorType(tensor_type) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( "Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." ) import tensorflow as tf as_tensor = tf.constant is_tensor = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.") import torch # noqa def as_tensor(value): if isinstance(value, (list, tuple)) and len(value) > 0 and isinstance(value[0], np.ndarray): value = np.array(value) return torch.tensor(value) is_tensor = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed.") import jax.numpy as jnp # noqa: F811 as_tensor = jnp.array is_tensor = is_jax_tensor else: def as_tensor(value, dtype=None): if isinstance(value, (list, tuple)) and isinstance(value[0], (list, tuple, np.ndarray)): value_lens = [len(val) for val in value] if len(set(value_lens)) > 1 and dtype is None: # we have a ragged list so handle explicitly value = as_tensor([np.asarray(val) for val in value], dtype=object) return np.asarray(value, dtype=dtype) is_tensor = is_numpy_array return is_tensor, as_tensor def convert_to_tensors(self, tensor_type: Optional[Union[str, TensorType]] = None): """ Convert the inner content to tensors. Args: tensor_type (`str` or [`~utils.TensorType`], *optional*): The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If `None`, no modification is done. """ if tensor_type is None: return self is_tensor, as_tensor = self._get_is_as_tensor_fns(tensor_type) # Do the tensor conversion in batch for key, value in self.items(): try: if not is_tensor(value): tensor = as_tensor(value) self[key] = tensor except: # noqa E722 if key == "overflowing_values": raise ValueError("Unable to create tensor returning overflowing values of different lengths. ") raise ValueError( "Unable to create tensor, you should probably activate padding " "with 'padding=True' to have batched tensors with the same length." ) return self def to(self, *args, **kwargs) -> "BatchFeature": """ Send all values to device by calling `v.to(*args, **kwargs)` (PyTorch only). This should support casting in different `dtypes` and sending the `BatchFeature` to a different `device`. Args: args (`Tuple`): Will be passed to the `to(...)` function of the tensors. kwargs (`Dict`, *optional*): Will be passed to the `to(...)` function of the tensors. Returns: [`BatchFeature`]: The same instance after modification. """ requires_backends(self, ["torch"]) import torch # noqa new_data = {} device = kwargs.get("device") # Check if the args are a device or a dtype if device is None and len(args) > 0: # device should be always the first argument arg = args[0] if is_torch_dtype(arg): # The first argument is a dtype pass elif isinstance(arg, str) or is_torch_device(arg) or isinstance(arg, int): device = arg else: # it's something else raise ValueError(f"Attempting to cast a BatchFeature to type {str(arg)}. This is not supported.") # We cast only floating point tensors to avoid issues with tokenizers casting `LongTensor` to `FloatTensor` for k, v in self.items(): # check if v is a floating point if torch.is_floating_point(v): # cast and send to device new_data[k] = v.to(*args, **kwargs) elif device is not None: new_data[k] = v.to(device=device) else: new_data[k] = v self.data = new_data return self class FeatureExtractionMixin(PushToHubMixin): """ This is a feature extraction mixin used to provide saving/loading functionality for sequential and image feature extractors. """ _auto_class = None def __init__(self, **kwargs): """Set elements of `kwargs` as attributes.""" # Pop "processor_class" as it should be saved as private attribute self._processor_class = kwargs.pop("processor_class", None) # Additional attributes without default values for key, value in kwargs.items(): try: setattr(self, key, value) except AttributeError as err: logger.error(f"Can't set {key} with value {value} for {self}") raise err def _set_processor_class(self, processor_class: str): """Sets processor class as an attribute.""" self._processor_class = processor_class @classmethod def from_pretrained( cls, pretrained_model_name_or_path: Union[str, os.PathLike], cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, local_files_only: bool = False, token: Optional[Union[str, bool]] = None, revision: str = "main", **kwargs, ): r""" Instantiate a type of [`~feature_extraction_utils.FeatureExtractionMixin`] from a feature extractor, *e.g.* a derived class of [`SequenceFeatureExtractor`]. Args: pretrained_model_name_or_path (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on huggingface.co. - a path to a *directory* containing a feature extractor file saved using the [`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] method, e.g., `./my_model_directory/`. - a path or url to a saved feature extractor JSON *file*, e.g., `./my_model_directory/preprocessor_config.json`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model feature extractor should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the feature extractor files and override the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. token (`str` or `bool`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. <Tip> To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>". </Tip> return_unused_kwargs (`bool`, *optional*, defaults to `False`): If `False`, then this function returns just the final feature extractor object. If `True`, then this functions returns a `Tuple(feature_extractor, unused_kwargs)` where *unused_kwargs* is a dictionary consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of `kwargs` which has not been used to update `feature_extractor` and is otherwise ignored. kwargs (`Dict[str, Any]`, *optional*): The values in kwargs of any keys which are feature extractor attributes will be used to override the loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is controlled by the `return_unused_kwargs` keyword parameter. Returns: A feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`]. Examples: ```python # We can't instantiate directly the base class *FeatureExtractionMixin* nor *SequenceFeatureExtractor* so let's show the examples on a # derived class: *Wav2Vec2FeatureExtractor* feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-base-960h" ) # Download feature_extraction_config from huggingface.co and cache. feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "./test/saved_model/" ) # E.g. feature_extractor (or model) was saved using *save_pretrained('./test/saved_model/')* feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("./test/saved_model/preprocessor_config.json") feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-base-960h", return_attention_mask=False, foo=False ) assert feature_extractor.return_attention_mask is False feature_extractor, unused_kwargs = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-base-960h", return_attention_mask=False, foo=False, return_unused_kwargs=True ) assert feature_extractor.return_attention_mask is False assert unused_kwargs == {"foo": False} ```""" kwargs["cache_dir"] = cache_dir kwargs["force_download"] = force_download kwargs["local_files_only"] = local_files_only kwargs["revision"] = revision use_auth_token = kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token if token is not None: kwargs["token"] = token feature_extractor_dict, kwargs = cls.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs) return cls.from_dict(feature_extractor_dict, **kwargs) def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs): """ Save a feature_extractor object to the directory `save_directory`, so that it can be re-loaded using the [`~feature_extraction_utils.FeatureExtractionMixin.from_pretrained`] class method. Args: save_directory (`str` or `os.PathLike`): Directory where the feature extractor JSON file will be saved (will be created if it does not exist). push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). kwargs (`Dict[str, Any]`, *optional*): Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ use_auth_token = kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if kwargs.get("token", None) is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) kwargs["token"] = use_auth_token if os.path.isfile(save_directory): raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file") os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop("commit_message", None) repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) repo_id = self._create_repo(repo_id, **kwargs) files_timestamps = self._get_files_timestamps(save_directory) # If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be # loaded from the Hub. if self._auto_class is not None: custom_object_save(self, save_directory, config=self) # If we save using the predefined names, we can load using `from_pretrained` output_feature_extractor_file = os.path.join(save_directory, FEATURE_EXTRACTOR_NAME) self.to_json_file(output_feature_extractor_file) logger.info(f"Feature extractor saved in {output_feature_extractor_file}") if push_to_hub: self._upload_modified_files( save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get("token"), ) return [output_feature_extractor_file] @classmethod def get_feature_extractor_dict( cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs ) -> Tuple[Dict[str, Any], Dict[str, Any]]: """ From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`] using `from_dict`. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`): The identifier of the pre-trained checkpoint from which we want the dictionary of parameters. Returns: `Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the feature extractor object. """ cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) subfolder = kwargs.pop("subfolder", None) token = kwargs.pop("token", None) use_auth_token = kwargs.pop("use_auth_token", None) local_files_only = kwargs.pop("local_files_only", False) revision = kwargs.pop("revision", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) user_agent = {"file_type": "feature extractor", "from_auto_class": from_auto_class} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True pretrained_model_name_or_path = str(pretrained_model_name_or_path) is_local = os.path.isdir(pretrained_model_name_or_path) if os.path.isdir(pretrained_model_name_or_path): feature_extractor_file = os.path.join(pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME) if os.path.isfile(pretrained_model_name_or_path): resolved_feature_extractor_file = pretrained_model_name_or_path is_local = True elif is_remote_url(pretrained_model_name_or_path): feature_extractor_file = pretrained_model_name_or_path resolved_feature_extractor_file = download_url(pretrained_model_name_or_path) else: feature_extractor_file = FEATURE_EXTRACTOR_NAME try: # Load from local folder or from cache or download from model Hub and cache resolved_feature_extractor_file = cached_file( pretrained_model_name_or_path, feature_extractor_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, subfolder=subfolder, token=token, user_agent=user_agent, revision=revision, ) except EnvironmentError: # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to # the original exception. raise except Exception: # For any other exception, we throw a generic error. raise EnvironmentError( f"Can't load feature extractor for '{pretrained_model_name_or_path}'. If you were trying to load" " it from 'https://huggingface.co/models', make sure you don't have a local directory with the" f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a" f" directory containing a {FEATURE_EXTRACTOR_NAME} file" ) try: # Load feature_extractor dict with open(resolved_feature_extractor_file, "r", encoding="utf-8") as reader: text = reader.read() feature_extractor_dict = json.loads(text) except json.JSONDecodeError: raise EnvironmentError( f"It looks like the config file at '{resolved_feature_extractor_file}' is not a valid JSON file." ) if is_local: logger.info(f"loading configuration file {resolved_feature_extractor_file}") else: logger.info( f"loading configuration file {feature_extractor_file} from cache at {resolved_feature_extractor_file}" ) if "auto_map" in feature_extractor_dict and not is_local: feature_extractor_dict["auto_map"] = add_model_info_to_auto_map( feature_extractor_dict["auto_map"], pretrained_model_name_or_path ) return feature_extractor_dict, kwargs @classmethod def from_dict(cls, feature_extractor_dict: Dict[str, Any], **kwargs) -> PreTrainedFeatureExtractor: """ Instantiates a type of [`~feature_extraction_utils.FeatureExtractionMixin`] from a Python dictionary of parameters. Args: feature_extractor_dict (`Dict[str, Any]`): Dictionary that will be used to instantiate the feature extractor object. Such a dictionary can be retrieved from a pretrained checkpoint by leveraging the [`~feature_extraction_utils.FeatureExtractionMixin.to_dict`] method. kwargs (`Dict[str, Any]`): Additional parameters from which to initialize the feature extractor object. Returns: [`~feature_extraction_utils.FeatureExtractionMixin`]: The feature extractor object instantiated from those parameters. """ return_unused_kwargs = kwargs.pop("return_unused_kwargs", False) # Update feature_extractor with kwargs if needed to_remove = [] for key, value in kwargs.items(): if key in feature_extractor_dict: feature_extractor_dict[key] = value to_remove.append(key) for key in to_remove: kwargs.pop(key, None) feature_extractor = cls(**feature_extractor_dict) logger.info(f"Feature extractor {feature_extractor}") if return_unused_kwargs: return feature_extractor, kwargs else: return feature_extractor def to_dict(self) -> Dict[str, Any]: """ Serializes this instance to a Python dictionary. Returns: `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. """ output = copy.deepcopy(self.__dict__) output["feature_extractor_type"] = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "window" in output: del output["window"] return output @classmethod def from_json_file(cls, json_file: Union[str, os.PathLike]) -> PreTrainedFeatureExtractor: """ Instantiates a feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`] from the path to a JSON file of parameters. Args: json_file (`str` or `os.PathLike`): Path to the JSON file containing the parameters. Returns: A feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`]: The feature_extractor object instantiated from that JSON file. """ with open(json_file, "r", encoding="utf-8") as reader: text = reader.read() feature_extractor_dict = json.loads(text) return cls(**feature_extractor_dict) def to_json_string(self) -> str: """ Serializes this instance to a JSON string. Returns: `str`: String containing all the attributes that make up this feature_extractor instance in JSON format. """ dictionary = self.to_dict() for key, value in dictionary.items(): if isinstance(value, np.ndarray): dictionary[key] = value.tolist() # make sure private name "_processor_class" is correctly # saved as "processor_class" _processor_class = dictionary.pop("_processor_class", None) if _processor_class is not None: dictionary["processor_class"] = _processor_class return json.dumps(dictionary, indent=2, sort_keys=True) + "\n" def to_json_file(self, json_file_path: Union[str, os.PathLike]): """ Save this instance to a JSON file. Args: json_file_path (`str` or `os.PathLike`): Path to the JSON file in which this feature_extractor instance's parameters will be saved. """ with open(json_file_path, "w", encoding="utf-8") as writer: writer.write(self.to_json_string()) def __repr__(self): return f"{self.__class__.__name__} {self.to_json_string()}" @classmethod def register_for_auto_class(cls, auto_class="AutoFeatureExtractor"): """ Register this class with a given auto class. This should only be used for custom feature extractors as the ones in the library are already mapped with `AutoFeatureExtractor`. <Tip warning={true}> This API is experimental and may have some slight breaking changes in the next releases. </Tip> Args: auto_class (`str` or `type`, *optional*, defaults to `"AutoFeatureExtractor"`): The auto class to register this new feature extractor with. """ if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f"{auto_class} is not a valid auto class.") cls._auto_class = auto_class FeatureExtractionMixin.push_to_hub = copy_func(FeatureExtractionMixin.push_to_hub) if FeatureExtractionMixin.push_to_hub.__doc__ is not None: FeatureExtractionMixin.push_to_hub.__doc__ = FeatureExtractionMixin.push_to_hub.__doc__.format( object="feature extractor", object_class="AutoFeatureExtractor", object_files="feature extractor file" )
0
mavonic_private_repos/transformers/src
mavonic_private_repos/transformers/src/transformers/optimization.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch optimization for BERT model.""" import math import warnings from functools import partial from typing import Callable, Iterable, Optional, Tuple, Union import torch from torch import nn from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR, ReduceLROnPlateau from .trainer_pt_utils import LayerWiseDummyOptimizer, LayerWiseDummyScheduler from .trainer_utils import SchedulerType from .utils import logging from .utils.versions import require_version logger = logging.get_logger(__name__) def _get_constant_lambda(_=None): return 1 def get_constant_schedule(optimizer: Optimizer, last_epoch: int = -1): """ Create a schedule with a constant learning rate, using the learning rate set in optimizer. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. Return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ return LambdaLR(optimizer, _get_constant_lambda, last_epoch=last_epoch) def get_reduce_on_plateau_schedule(optimizer: Optimizer, **kwargs): """ Create a schedule with a constant learning rate that decreases when a metric has stopped improving. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. kwargs (`dict`, *optional*): Extra parameters to be passed to the scheduler. See `torch.optim.lr_scheduler.ReduceLROnPlateau` for possible parameters. Return: `torch.optim.lr_scheduler.ReduceLROnPlateau` with the appropriate schedule. """ return ReduceLROnPlateau(optimizer, **kwargs) def _get_constant_schedule_with_warmup_lr_lambda(current_step: int, *, num_warmup_steps: int): if current_step < num_warmup_steps: return float(current_step) / float(max(1.0, num_warmup_steps)) return 1.0 def get_constant_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, last_epoch: int = -1): """ Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. num_warmup_steps (`int`): The number of steps for the warmup phase. last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. Return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ lr_lambda = partial(_get_constant_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps) return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) def _get_linear_schedule_with_warmup_lr_lambda(current_step: int, *, num_warmup_steps: int, num_training_steps: int): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) return max(0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))) def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): """ Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. num_warmup_steps (`int`): The number of steps for the warmup phase. num_training_steps (`int`): The total number of training steps. last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. Return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ lr_lambda = partial( _get_linear_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, ) return LambdaLR(optimizer, lr_lambda, last_epoch) def _get_cosine_schedule_with_warmup_lr_lambda( current_step: int, *, num_warmup_steps: int, num_training_steps: int, num_cycles: float ): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) def get_cosine_schedule_with_warmup( optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float = 0.5, last_epoch: int = -1 ): """ Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. num_warmup_steps (`int`): The number of steps for the warmup phase. num_training_steps (`int`): The total number of training steps. num_cycles (`float`, *optional*, defaults to 0.5): The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 following a half-cosine). last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. Return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ lr_lambda = partial( _get_cosine_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, num_cycles=num_cycles, ) return LambdaLR(optimizer, lr_lambda, last_epoch) def _get_cosine_with_hard_restarts_schedule_with_warmup_lr_lambda( current_step: int, *, num_warmup_steps: int, num_training_steps: int, num_cycles: int ): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) if progress >= 1.0: return 0.0 return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0)))) def get_cosine_with_hard_restarts_schedule_with_warmup( optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int = 1, last_epoch: int = -1 ): """ Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. num_warmup_steps (`int`): The number of steps for the warmup phase. num_training_steps (`int`): The total number of training steps. num_cycles (`int`, *optional*, defaults to 1): The number of hard restarts to use. last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. Return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ lr_lambda = partial( _get_cosine_with_hard_restarts_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, num_cycles=num_cycles, ) return LambdaLR(optimizer, lr_lambda, last_epoch) def _get_polynomial_decay_schedule_with_warmup_lr_lambda( current_step: int, *, num_warmup_steps: int, num_training_steps: int, lr_end: float, power: float, lr_init: int, ): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: lr_range = lr_init - lr_end decay_steps = num_training_steps - num_warmup_steps pct_remaining = 1 - (current_step - num_warmup_steps) / decay_steps decay = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init def get_polynomial_decay_schedule_with_warmup( optimizer, num_warmup_steps, num_training_steps, lr_end=1e-7, power=1.0, last_epoch=-1 ): """ Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the optimizer to end lr defined by *lr_end*, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. num_warmup_steps (`int`): The number of steps for the warmup phase. num_training_steps (`int`): The total number of training steps. lr_end (`float`, *optional*, defaults to 1e-7): The end LR. power (`float`, *optional*, defaults to 1.0): Power factor. last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. Note: *power* defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT implementation at https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37 Return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ lr_init = optimizer.defaults["lr"] if not (lr_init > lr_end): raise ValueError(f"lr_end ({lr_end}) must be smaller than initial lr ({lr_init})") lr_lambda = partial( _get_polynomial_decay_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, lr_end=lr_end, power=power, lr_init=lr_init, ) return LambdaLR(optimizer, lr_lambda, last_epoch) def _get_inverse_sqrt_schedule_lr_lambda(current_step: int, *, num_warmup_steps: int, timescale: int = None): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) shift = timescale - num_warmup_steps decay = 1.0 / math.sqrt((current_step + shift) / timescale) return decay def get_inverse_sqrt_schedule( optimizer: Optimizer, num_warmup_steps: int, timescale: int = None, last_epoch: int = -1 ): """ Create a schedule with an inverse square-root learning rate, from the initial lr set in the optimizer, after a warmup period which increases lr linearly from 0 to the initial lr set in the optimizer. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. num_warmup_steps (`int`): The number of steps for the warmup phase. timescale (`int`, *optional*, defaults to `num_warmup_steps`): Time scale. last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. Return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ # Note: this implementation is adapted from # https://github.com/google-research/big_vision/blob/f071ce68852d56099437004fd70057597a95f6ef/big_vision/utils.py#L930 if timescale is None: timescale = num_warmup_steps or 10_000 lr_lambda = partial(_get_inverse_sqrt_schedule_lr_lambda, num_warmup_steps=num_warmup_steps, timescale=timescale) return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) def _get_cosine_schedule_with_warmup_lr_lambda( current_step: int, *, num_warmup_steps: int, num_training_steps: int, num_cycles: float, min_lr_rate: float = 0.0 ): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) factor = 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)) factor = factor * (1 - min_lr_rate) + min_lr_rate return max(0, factor) def get_cosine_with_min_lr_schedule_with_warmup( optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float = 0.5, last_epoch: int = -1, min_lr: float = None, min_lr_rate: float = None, ): """ Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to min_lr, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. num_warmup_steps (`int`): The number of steps for the warmup phase. num_training_steps (`int`): The total number of training steps. num_cycles (`float`, *optional*, defaults to 0.5): The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 following a half-cosine). last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. min_lr (`float`, *optional*): The minimum learning rate to reach after the cosine schedule. min_lr_rate (`float`, *optional*): The minimum learning rate as a ratio of the initial learning rate. If set, `min_lr` should not be set. Return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ if min_lr is not None and min_lr_rate is not None: raise ValueError("Only one of min_lr or min_lr_rate should be set") elif min_lr is not None: min_lr_rate = min_lr / optimizer.defaults["lr"] elif min_lr_rate is None: raise ValueError("One of min_lr or min_lr_rate should be set through the `lr_scheduler_kwargs`") lr_lambda = partial( _get_cosine_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, num_cycles=num_cycles, min_lr_rate=min_lr_rate, ) return LambdaLR(optimizer, lr_lambda, last_epoch) def _get_wsd_scheduler_lambda( current_step: int, *, num_warmup_steps: int, num_stable_steps: int, num_decay_steps: int, num_cycles: float, min_lr_ratio: float, ): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) if current_step < num_warmup_steps + num_stable_steps: return 1.0 if current_step < num_warmup_steps + num_stable_steps + num_decay_steps: progress = float(current_step - num_warmup_steps - num_stable_steps) / float(max(1, num_decay_steps)) value = max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) return (1.0 - min_lr_ratio) * value + min_lr_ratio return min_lr_ratio def get_wsd_schedule( optimizer: Optimizer, num_warmup_steps: int, num_stable_steps: int, num_decay_steps: int, min_lr_ratio: float = 0, num_cycles: float = 0.5, last_epoch: int = -1, ): """ Create a schedule with a learning rate that has three stages: 1. linear increase from 0 to initial lr. 2. constant lr (equal to initial lr). 3. decrease following the values of the cosine function between the initial lr set in the optimizer to a fraction of initial lr. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. num_warmup_steps (`int`): The number of steps for the warmup phase. num_stable_steps (`int`): The number of steps for the stable phase. num_decay_steps (`int`): The number of steps for the cosine annealing phase. min_lr_ratio (`float`, *optional*, defaults to 0): The minimum learning rate as a ratio of the initial learning rate. num_cycles (`float`, *optional*, defaults to 0.5): The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 following a half-cosine). last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. Return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ lr_lambda = partial( _get_wsd_scheduler_lambda, num_warmup_steps=num_warmup_steps, num_stable_steps=num_stable_steps, num_decay_steps=num_decay_steps, min_lr_ratio=min_lr_ratio, num_cycles=num_cycles, ) return LambdaLR(optimizer, lr_lambda, last_epoch) TYPE_TO_SCHEDULER_FUNCTION = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.INVERSE_SQRT: get_inverse_sqrt_schedule, SchedulerType.REDUCE_ON_PLATEAU: get_reduce_on_plateau_schedule, SchedulerType.COSINE_WITH_MIN_LR: get_cosine_with_min_lr_schedule_with_warmup, SchedulerType.WARMUP_STABLE_DECAY: get_wsd_schedule, } def get_scheduler( name: Union[str, SchedulerType], optimizer: Optimizer, num_warmup_steps: Optional[int] = None, num_training_steps: Optional[int] = None, scheduler_specific_kwargs: Optional[dict] = None, ): """ Unified API to get any scheduler from its name. Args: name (`str` or `SchedulerType`): The name of the scheduler to use. optimizer (`torch.optim.Optimizer`): The optimizer that will be used during training. num_warmup_steps (`int`, *optional*): The number of warmup steps to do. This is not required by all schedulers (hence the argument being optional), the function will raise an error if it's unset and the scheduler type requires it. num_training_steps (`int``, *optional*): The number of training steps to do. This is not required by all schedulers (hence the argument being optional), the function will raise an error if it's unset and the scheduler type requires it. scheduler_specific_kwargs (`dict`, *optional*): Extra parameters for schedulers such as cosine with restarts. Mismatched scheduler types and scheduler parameters will cause the scheduler function to raise a TypeError. """ name = SchedulerType(name) schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name] # If a `LayerWiseDummyOptimizer` is passed we extract the optimizer dict and # recursively call `get_scheduler` to get the proper schedulers on each parameter if optimizer is not None and isinstance(optimizer, LayerWiseDummyOptimizer): optimizer_dict = optimizer.optimizer_dict scheduler_dict = {} for param in optimizer_dict.keys(): scheduler_dict[param] = get_scheduler( name, optimizer=optimizer_dict[param], num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, ) def scheduler_hook(param): # Since the optimizer hook has been already attached we only need to # attach the scheduler hook, the gradients have been zeroed here scheduler_dict[param].step() for param in optimizer_dict.keys(): if param.requires_grad: param.register_post_accumulate_grad_hook(scheduler_hook) return LayerWiseDummyScheduler() if name == SchedulerType.CONSTANT: return schedule_func(optimizer) if scheduler_specific_kwargs is None: scheduler_specific_kwargs = {} if name == SchedulerType.REDUCE_ON_PLATEAU: return schedule_func(optimizer, **scheduler_specific_kwargs) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument.") if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(optimizer, num_warmup_steps=num_warmup_steps) if name == SchedulerType.INVERSE_SQRT: return schedule_func(optimizer, num_warmup_steps=num_warmup_steps) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f"{name} requires `num_training_steps`, please provide that argument.") return schedule_func( optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, **scheduler_specific_kwargs, ) class AdamW(Optimizer): """ Implements Adam algorithm with weight decay fix as introduced in [Decoupled Weight Decay Regularization](https://arxiv.org/abs/1711.05101). Parameters: params (`Iterable[nn.parameter.Parameter]`): Iterable of parameters to optimize or dictionaries defining parameter groups. lr (`float`, *optional*, defaults to 0.001): The learning rate to use. betas (`Tuple[float,float]`, *optional*, defaults to `(0.9, 0.999)`): Adam's betas parameters (b1, b2). eps (`float`, *optional*, defaults to 1e-06): Adam's epsilon for numerical stability. weight_decay (`float`, *optional*, defaults to 0.0): Decoupled weight decay to apply. correct_bias (`bool`, *optional*, defaults to `True`): Whether or not to correct bias in Adam (for instance, in Bert TF repository they use `False`). no_deprecation_warning (`bool`, *optional*, defaults to `False`): A flag used to disable the deprecation warning (set to `True` to disable the warning). """ def __init__( self, params: Iterable[nn.parameter.Parameter], lr: float = 1e-3, betas: Tuple[float, float] = (0.9, 0.999), eps: float = 1e-6, weight_decay: float = 0.0, correct_bias: bool = True, no_deprecation_warning: bool = False, ): if not no_deprecation_warning: warnings.warn( "This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch" " implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this" " warning", FutureWarning, ) require_version("torch>=1.5.0") # add_ with alpha if lr < 0.0: raise ValueError(f"Invalid learning rate: {lr} - should be >= 0.0") if not 0.0 <= betas[0] < 1.0: raise ValueError(f"Invalid beta parameter: {betas[0]} - should be in [0.0, 1.0)") if not 0.0 <= betas[1] < 1.0: raise ValueError(f"Invalid beta parameter: {betas[1]} - should be in [0.0, 1.0)") if not 0.0 <= eps: raise ValueError(f"Invalid epsilon value: {eps} - should be >= 0.0") defaults = {"lr": lr, "betas": betas, "eps": eps, "weight_decay": weight_decay, "correct_bias": correct_bias} super().__init__(params, defaults) @torch.no_grad() def step(self, closure: Callable = None): """ Performs a single optimization step. Arguments: closure (`Callable`, *optional*): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group["params"]: if p.grad is None: continue grad = p.grad if grad.is_sparse: raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead") state = self.state[p] # State initialization if len(state) == 0: state["step"] = 0 # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(p) # Exponential moving average of squared gradient values state["exp_avg_sq"] = torch.zeros_like(p) exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] beta1, beta2 = group["betas"] state["step"] += 1 # Decay the first and second moment running average coefficient # In-place operations to update the averages at the same time exp_avg.mul_(beta1).add_(grad, alpha=(1.0 - beta1)) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2) denom = exp_avg_sq.sqrt().add_(group["eps"]) step_size = group["lr"] if group["correct_bias"]: # No bias correction for Bert bias_correction1 = 1.0 - beta1 ** state["step"] bias_correction2 = 1.0 - beta2 ** state["step"] step_size = step_size * math.sqrt(bias_correction2) / bias_correction1 p.addcdiv_(exp_avg, denom, value=-step_size) # Just adding the square of the weights to the loss function is *not* # the correct way of using L2 regularization/weight decay with Adam, # since that will interact with the m and v parameters in strange ways. # # Instead we want to decay the weights in a manner that doesn't interact # with the m/v parameters. This is equivalent to adding the square # of the weights to the loss with plain (non-momentum) SGD. # Add weight decay at the end (fixed version) if group["weight_decay"] > 0.0: p.add_(p, alpha=(-group["lr"] * group["weight_decay"])) return loss class Adafactor(Optimizer): """ AdaFactor pytorch implementation can be used as a drop in replacement for Adam original fairseq code: https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py Paper: *Adafactor: Adaptive Learning Rates with Sublinear Memory Cost* https://arxiv.org/abs/1804.04235 Note that this optimizer internally adjusts the learning rate depending on the `scale_parameter`, `relative_step` and `warmup_init` options. To use a manual (external) learning rate schedule you should set `scale_parameter=False` and `relative_step=False`. Arguments: params (`Iterable[nn.parameter.Parameter]`): Iterable of parameters to optimize or dictionaries defining parameter groups. lr (`float`, *optional*): The external learning rate. eps (`Tuple[float, float]`, *optional*, defaults to `(1e-30, 0.001)`): Regularization constants for square gradient and parameter scale respectively clip_threshold (`float`, *optional*, defaults to 1.0): Threshold of root mean square of final gradient update decay_rate (`float`, *optional*, defaults to -0.8): Coefficient used to compute running averages of square beta1 (`float`, *optional*): Coefficient used for computing running averages of gradient weight_decay (`float`, *optional*, defaults to 0.0): Weight decay (L2 penalty) scale_parameter (`bool`, *optional*, defaults to `True`): If True, learning rate is scaled by root mean square relative_step (`bool`, *optional*, defaults to `True`): If True, time-dependent learning rate is computed instead of external learning rate warmup_init (`bool`, *optional*, defaults to `False`): Time-dependent learning rate computation depends on whether warm-up initialization is being used This implementation handles low-precision (FP16, bfloat) values, but we have not thoroughly tested. Recommended T5 finetuning settings (https://discuss.huggingface.co/t/t5-finetuning-tips/684/3): - Training without LR warmup or clip_threshold is not recommended. - use scheduled LR warm-up to fixed LR - use clip_threshold=1.0 (https://arxiv.org/abs/1804.04235) - Disable relative updates - Use scale_parameter=False - Additional optimizer operations like gradient clipping should not be used alongside Adafactor Example: ```python Adafactor(model.parameters(), scale_parameter=False, relative_step=False, warmup_init=False, lr=1e-3) ``` Others reported the following combination to work well: ```python Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None) ``` When using `lr=None` with [`Trainer`] you will most likely need to use [`~optimization.AdafactorSchedule`] scheduler as following: ```python from transformers.optimization import Adafactor, AdafactorSchedule optimizer = Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None) lr_scheduler = AdafactorSchedule(optimizer) trainer = Trainer(..., optimizers=(optimizer, lr_scheduler)) ``` Usage: ```python # replace AdamW with Adafactor optimizer = Adafactor( model.parameters(), lr=1e-3, eps=(1e-30, 1e-3), clip_threshold=1.0, decay_rate=-0.8, beta1=None, weight_decay=0.0, relative_step=False, scale_parameter=False, warmup_init=False, ) ```""" def __init__( self, params, lr=None, eps=(1e-30, 1e-3), clip_threshold=1.0, decay_rate=-0.8, beta1=None, weight_decay=0.0, scale_parameter=True, relative_step=True, warmup_init=False, ): require_version("torch>=1.5.0") # add_ with alpha if lr is not None and relative_step: raise ValueError("Cannot combine manual `lr` and `relative_step=True` options") if warmup_init and not relative_step: raise ValueError("`warmup_init=True` requires `relative_step=True`") defaults = { "lr": lr, "eps": eps, "clip_threshold": clip_threshold, "decay_rate": decay_rate, "beta1": beta1, "weight_decay": weight_decay, "scale_parameter": scale_parameter, "relative_step": relative_step, "warmup_init": warmup_init, } super().__init__(params, defaults) @staticmethod def _get_lr(param_group, param_state): rel_step_sz = param_group["lr"] if param_group["relative_step"]: min_step = 1e-6 * param_state["step"] if param_group["warmup_init"] else 1e-2 rel_step_sz = min(min_step, 1.0 / math.sqrt(param_state["step"])) param_scale = 1.0 if param_group["scale_parameter"]: param_scale = max(param_group["eps"][1], param_state["RMS"]) return param_scale * rel_step_sz @staticmethod def _get_options(param_group, param_shape): factored = len(param_shape) >= 2 use_first_moment = param_group["beta1"] is not None return factored, use_first_moment @staticmethod def _rms(tensor): return tensor.norm(2) / (tensor.numel() ** 0.5) @staticmethod def _approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col): # copy from fairseq's adafactor implementation: # https://github.com/huggingface/transformers/blob/8395f14de6068012787d83989c3627c3df6a252b/src/transformers/optimization.py#L505 r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1) c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt() return torch.mul(r_factor, c_factor) @torch.no_grad() def step(self, closure=None): """ Performs a single optimization step Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group["params"]: if p.grad is None: continue grad = p.grad if grad.dtype in {torch.float16, torch.bfloat16}: grad = grad.float() if grad.is_sparse: raise RuntimeError("Adafactor does not support sparse gradients.") state = self.state[p] grad_shape = grad.shape factored, use_first_moment = self._get_options(group, grad_shape) # State Initialization if len(state) == 0: state["step"] = 0 if use_first_moment: # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(grad) if factored: state["exp_avg_sq_row"] = torch.zeros(grad_shape[:-1]).to(grad) state["exp_avg_sq_col"] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).to(grad) else: state["exp_avg_sq"] = torch.zeros_like(grad) state["RMS"] = 0 else: if use_first_moment: state["exp_avg"] = state["exp_avg"].to(grad) if factored: state["exp_avg_sq_row"] = state["exp_avg_sq_row"].to(grad) state["exp_avg_sq_col"] = state["exp_avg_sq_col"].to(grad) else: state["exp_avg_sq"] = state["exp_avg_sq"].to(grad) p_data_fp32 = p if p.dtype in {torch.float16, torch.bfloat16}: p_data_fp32 = p_data_fp32.float() state["step"] += 1 state["RMS"] = self._rms(p_data_fp32) lr = self._get_lr(group, state) beta2t = 1.0 - math.pow(state["step"], group["decay_rate"]) update = (grad**2) + group["eps"][0] if factored: exp_avg_sq_row = state["exp_avg_sq_row"] exp_avg_sq_col = state["exp_avg_sq_col"] exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=(1.0 - beta2t)) exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=(1.0 - beta2t)) # Approximation of exponential moving average of square of gradient update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) update.mul_(grad) else: exp_avg_sq = state["exp_avg_sq"] exp_avg_sq.mul_(beta2t).add_(update, alpha=(1.0 - beta2t)) update = exp_avg_sq.rsqrt().mul_(grad) update.div_((self._rms(update) / group["clip_threshold"]).clamp_(min=1.0)) update.mul_(lr) if use_first_moment: exp_avg = state["exp_avg"] exp_avg.mul_(group["beta1"]).add_(update, alpha=(1 - group["beta1"])) update = exp_avg if group["weight_decay"] != 0: p_data_fp32.add_(p_data_fp32, alpha=(-group["weight_decay"] * lr)) p_data_fp32.add_(-update) if p.dtype in {torch.float16, torch.bfloat16}: p.copy_(p_data_fp32) return loss class AdafactorSchedule(LambdaLR): """ Since [`~optimization.Adafactor`] performs its own scheduling, if the training loop relies on a scheduler (e.g., for logging), this class creates a proxy object that retrieves the current lr values from the optimizer. It returns `initial_lr` during startup and the actual `lr` during stepping. """ def __init__(self, optimizer, initial_lr=0.0): def lr_lambda(_): return initial_lr for group in optimizer.param_groups: group["initial_lr"] = initial_lr super().__init__(optimizer, lr_lambda) for group in optimizer.param_groups: del group["initial_lr"] def get_lr(self): opt = self.optimizer lrs = [ opt._get_lr(group, opt.state[group["params"][0]]) for group in opt.param_groups if group["params"][0].grad is not None ] if len(lrs) == 0: lrs = self.base_lrs # if called before stepping return lrs def get_adafactor_schedule(optimizer, initial_lr=0.0): """ Get a proxy schedule for [`~optimization.Adafactor`] Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. initial_lr (`float`, *optional*, defaults to 0.0): Initial lr Return: [`~optimization.Adafactor`] proxy schedule object. """ return AdafactorSchedule(optimizer, initial_lr)
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/dummy_sentencepiece_objects.py
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class AlbertTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class BarthezTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class BartphoTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class BertGenerationTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class BigBirdTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class CamembertTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class CodeLlamaTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class CpmTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class DebertaV2Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class ErnieMTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class FNetTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class GemmaTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class GPTSw3Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class LayoutXLMTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class LlamaTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class M2M100Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class MarianTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class MBart50Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class MBartTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class MLukeTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class MT5Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class NllbTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class PegasusTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class PLBartTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class ReformerTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class RemBertTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class SeamlessM4TTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class SiglipTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class Speech2TextTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class SpeechT5Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class T5Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class UdopTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class XGLMTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class XLMProphetNetTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class XLMRobertaTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class XLNetTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"])
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/dummy_tokenizers_objects.py
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class AlbertTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class BartTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class BarthezTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class BertTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class BigBirdTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class BlenderbotTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class BlenderbotSmallTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class BloomTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class CamembertTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class CLIPTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class CodeLlamaTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class CodeGenTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class CohereTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class ConvBertTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class CpmTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class DebertaTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class DebertaV2TokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class RetriBertTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class DistilBertTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class DPRContextEncoderTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class DPRQuestionEncoderTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class DPRReaderTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class ElectraTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class FNetTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class FunnelTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class GemmaTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class GPT2TokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class GPTNeoXTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class GPTNeoXJapaneseTokenizer(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class HerbertTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class LayoutLMTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class LayoutLMv2TokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class LayoutLMv3TokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class LayoutXLMTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class LEDTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class LlamaTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class LongformerTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class LxmertTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class MarkupLMTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class MBartTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class MBart50TokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class MobileBertTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class MPNetTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class MT5TokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class MvpTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class NllbTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class NougatTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class OpenAIGPTTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class PegasusTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class Qwen2TokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class RealmTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class ReformerTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class RemBertTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class RobertaTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class RoFormerTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class SeamlessM4TTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class SplinterTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class SqueezeBertTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class T5TokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class UdopTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class WhisperTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class XGLMTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class XLMRobertaTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class XLNetTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class PreTrainedTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"])
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/model_parallel_utils.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from math import ceil def assert_device_map(device_map, num_blocks): blocks = list(range(0, num_blocks)) device_map_blocks = [item for sublist in list(device_map.values()) for item in sublist] # Duplicate check duplicate_blocks = [] for i in device_map_blocks: if device_map_blocks.count(i) > 1 and i not in duplicate_blocks: duplicate_blocks.append(i) # Missing blocks missing_blocks = [i for i in blocks if i not in device_map_blocks] extra_blocks = [i for i in device_map_blocks if i not in blocks] if len(duplicate_blocks) != 0: raise ValueError( "Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device." " These attention blocks were specified more than once: " + str(duplicate_blocks) ) if len(missing_blocks) != 0: raise ValueError( "There are attention blocks for this model that are not specified in the device_map. Add these attention " "blocks to a device on the device_map: " + str(missing_blocks) ) if len(extra_blocks) != 0: raise ValueError( "The device_map contains more attention blocks than this model has. Remove these from the device_map:" + str(extra_blocks) ) def get_device_map(n_layers, devices): """Returns a dictionary of layers distributed evenly across all devices.""" layers = list(range(n_layers)) n_blocks = int(ceil(n_layers / len(devices))) layers_list = [layers[i : i + n_blocks] for i in range(0, n_layers, n_blocks)] return dict(zip(devices, layers_list))
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/sentencepiece_model_pb2_new.py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: sentencepiece_model.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03' ) _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals) if _descriptor._USE_C_DESCRIPTORS is False: DESCRIPTOR._options = None DESCRIPTOR._serialized_options = b"H\003" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" _globals["_TRAINERSPEC"]._serialized_start = 45 _globals["_TRAINERSPEC"]._serialized_end = 1581 _globals["_TRAINERSPEC_MODELTYPE"]._serialized_start = 1517 _globals["_TRAINERSPEC_MODELTYPE"]._serialized_end = 1570 _globals["_NORMALIZERSPEC"]._serialized_start = 1584 _globals["_NORMALIZERSPEC"]._serialized_end = 1793 _globals["_SELFTESTDATA"]._serialized_start = 1795 _globals["_SELFTESTDATA"]._serialized_end = 1916 _globals["_SELFTESTDATA_SAMPLE"]._serialized_start = 1864 _globals["_SELFTESTDATA_SAMPLE"]._serialized_end = 1905 _globals["_MODELPROTO"]._serialized_start = 1919 _globals["_MODELPROTO"]._serialized_end = 2429 _globals["_MODELPROTO_SENTENCEPIECE"]._serialized_start = 2208 _globals["_MODELPROTO_SENTENCEPIECE"]._serialized_end = 2418 _globals["_MODELPROTO_SENTENCEPIECE_TYPE"]._serialized_start = 2323 _globals["_MODELPROTO_SENTENCEPIECE_TYPE"]._serialized_end = 2407 # @@protoc_insertion_point(module_scope)
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects.py
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class Pop2PianoFeatureExtractor(metaclass=DummyObject): _backends = ["essentia", "librosa", "pretty_midi", "scipy", "torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["essentia", "librosa", "pretty_midi", "scipy", "torch"]) class Pop2PianoTokenizer(metaclass=DummyObject): _backends = ["essentia", "librosa", "pretty_midi", "scipy", "torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["essentia", "librosa", "pretty_midi", "scipy", "torch"]) class Pop2PianoProcessor(metaclass=DummyObject): _backends = ["essentia", "librosa", "pretty_midi", "scipy", "torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["essentia", "librosa", "pretty_midi", "scipy", "torch"])
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/quantization_config.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import importlib.metadata import json import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, List, Optional, Union from packaging import version from ..utils import is_auto_awq_available, is_hqq_available, is_torch_available, logging if is_torch_available(): import torch logger = logging.get_logger(__name__) class QuantizationMethod(str, Enum): BITS_AND_BYTES = "bitsandbytes" GPTQ = "gptq" AWQ = "awq" AQLM = "aqlm" QUANTO = "quanto" EETQ = "eetq" HQQ = "hqq" class AWQLinearVersion(str, Enum): GEMM = "gemm" GEMV = "gemv" EXLLAMA = "exllama" @staticmethod def from_str(version: str): version = version.lower() if version == "gemm": return AWQLinearVersion.GEMM elif version == "gemv": return AWQLinearVersion.GEMV elif version == "exllama": return AWQLinearVersion.EXLLAMA else: raise ValueError(f"Unknown AWQLinearVersion {version}") class AwqBackendPackingMethod(str, Enum): AUTOAWQ = "autoawq" LLMAWQ = "llm-awq" @dataclass class QuantizationConfigMixin: """ Mixin class for quantization config """ quant_method: QuantizationMethod @classmethod def from_dict(cls, config_dict, return_unused_kwargs=False, **kwargs): """ Instantiates a [`QuantizationConfigMixin`] from a Python dictionary of parameters. Args: config_dict (`Dict[str, Any]`): Dictionary that will be used to instantiate the configuration object. return_unused_kwargs (`bool`,*optional*, defaults to `False`): Whether or not to return a list of unused keyword arguments. Used for `from_pretrained` method in `PreTrainedModel`. kwargs (`Dict[str, Any]`): Additional parameters from which to initialize the configuration object. Returns: [`QuantizationConfigMixin`]: The configuration object instantiated from those parameters. """ config = cls(**config_dict) to_remove = [] for key, value in kwargs.items(): if hasattr(config, key): setattr(config, key, value) to_remove.append(key) for key in to_remove: kwargs.pop(key, None) if return_unused_kwargs: return config, kwargs else: return config def to_json_file(self, json_file_path: Union[str, os.PathLike]): """ Save this instance to a JSON file. Args: json_file_path (`str` or `os.PathLike`): Path to the JSON file in which this configuration instance's parameters will be saved. use_diff (`bool`, *optional*, defaults to `True`): If set to `True`, only the difference between the config instance and the default `QuantizationConfig()` is serialized to JSON file. """ with open(json_file_path, "w", encoding="utf-8") as writer: config_dict = self.to_dict() json_string = json.dumps(config_dict, indent=2, sort_keys=True) + "\n" writer.write(json_string) def to_dict(self) -> Dict[str, Any]: """ Serializes this instance to a Python dictionary. Returns: `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. """ return copy.deepcopy(self.__dict__) def __iter__(self): """allows `dict(obj)` for situations where obj may be a dict or QuantizationConfigMixin""" for attr, value in copy.deepcopy(self.__dict__).items(): yield attr, value def __repr__(self): return f"{self.__class__.__name__} {self.to_json_string()}" def to_json_string(self, use_diff: bool = True) -> str: """ Serializes this instance to a JSON string. Args: use_diff (`bool`, *optional*, defaults to `True`): If set to `True`, only the difference between the config instance and the default `PretrainedConfig()` is serialized to JSON string. Returns: `str`: String containing all the attributes that make up this configuration instance in JSON format. """ if use_diff is True: config_dict = self.to_diff_dict() else: config_dict = self.to_dict() return json.dumps(config_dict, indent=2, sort_keys=True) + "\n" def update(self, **kwargs): """ Updates attributes of this class instance with attributes from `kwargs` if they match existing atributtes, returning all the unused kwargs. Args: kwargs (`Dict[str, Any]`): Dictionary of attributes to tentatively update this class. Returns: `Dict[str, Any]`: Dictionary containing all the key-value pairs that were not used to update the instance. """ to_remove = [] for key, value in kwargs.items(): if hasattr(self, key): setattr(self, key, value) to_remove.append(key) # Remove all the attributes that were updated, without modifying the input dict unused_kwargs = {key: value for key, value in kwargs.items() if key not in to_remove} return unused_kwargs @dataclass class HqqConfig(QuantizationConfigMixin): """ This is wrapper around hqq's BaseQuantizeConfig. Args: nbits (`int`, *optional*, defaults to 4): Number of bits. Supported values are (8, 4, 3, 2, 1). group_size (`int`, *optional*, defaults to 64): Group-size value. Supported values are any value that is divisble by weight.shape[axis]). quant_zero (`bool`, *optional*, defaults to `True`): Quantize the zero-point if set to `True`. quant_scale (`bool`, *optional*, defaults to `False`): Quantize the scaling if set to `True`. offload_meta (`bool`, *optional*, defaults to `False`): Offload the meta-data to the CPU if set to `True`. view_as_float (`bool`, *optional*, defaults to `False`): View the quantized weight as float (used in distributed training) if set to `True`. axis (`int`, *optional*, defaults to 0): Axis along which grouping is performed. Supported values are 0 or 1. dynamic_config (dict, *optional*): Parameters for dynamic configuration. The key is the name tag of the layer and the value is a quantization config. If set, each layer specified by its id will use its dedicated quantization configuration. skip_modules (`List[str]`, *optional*, defaults to `['lm_head']`): List of `nn.Linear` layers to skip. kwargs (`Dict[str, Any]`, *optional*): Additional parameters from which to initialize the configuration object. """ def __init__( self, nbits: int = 4, group_size: int = 64, quant_zero: bool = True, quant_scale: bool = False, offload_meta: bool = False, view_as_float: bool = False, axis: int = 0, dynamic_config: Optional[dict] = None, skip_modules: List[str] = ["lm_head"], **kwargs, ): if is_hqq_available(): from hqq.core.quantize import BaseQuantizeConfig as HQQBaseQuantizeConfig if axis not in [0, 1]: raise ValueError("Invalid axis value. Only 0 and 1 are allowed.") if dynamic_config is not None: self.quant_config = {} for key in dynamic_config: self.quant_config[key] = HQQBaseQuantizeConfig(**dynamic_config[key]) else: self.quant_config = HQQBaseQuantizeConfig( **{ "nbits": nbits, "group_size": group_size, "quant_zero": quant_zero, "quant_scale": quant_scale, "offload_meta": offload_meta, "view_as_float": view_as_float, "axis": axis, } ) self.quant_method = QuantizationMethod.HQQ self.skip_modules = skip_modules self.post_init() def post_init(self): r""" Safety checker that arguments are correct - also replaces some NoneType arguments with their default values. """ pass def to_dict(self) -> Dict[str, Any]: """ Serializes this instance to a Python dictionary. Returns: `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. """ return self.quant_config def __repr__(self): config_dict = self.to_dict() return f"{self.__class__.__name__} {json.dumps(config_dict, indent=2, sort_keys=True)}\n" def to_diff_dict(self) -> Dict[str, Any]: """ Removes all attributes from config which correspond to the default config attributes for better readability and serializes to a Python dictionary. Returns: `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance, """ config_dict = self.to_dict() # get the default config dict default_config_dict = HqqConfig().to_dict() serializable_config_dict = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if value != default_config_dict[key]: serializable_config_dict[key] = value return serializable_config_dict @dataclass class BitsAndBytesConfig(QuantizationConfigMixin): """ This is a wrapper class about all possible attributes and features that you can play with a model that has been loaded using `bitsandbytes`. This replaces `load_in_8bit` or `load_in_4bit`therefore both options are mutually exclusive. Currently only supports `LLM.int8()`, `FP4`, and `NF4` quantization. If more methods are added to `bitsandbytes`, then more arguments will be added to this class. Args: load_in_8bit (`bool`, *optional*, defaults to `False`): This flag is used to enable 8-bit quantization with LLM.int8(). load_in_4bit (`bool`, *optional*, defaults to `False`): This flag is used to enable 4-bit quantization by replacing the Linear layers with FP4/NF4 layers from `bitsandbytes`. llm_int8_threshold (`float`, *optional*, defaults to 6.0): This corresponds to the outlier threshold for outlier detection as described in `LLM.int8() : 8-bit Matrix Multiplication for Transformers at Scale` paper: https://arxiv.org/abs/2208.07339 Any hidden states value that is above this threshold will be considered an outlier and the operation on those values will be done in fp16. Values are usually normally distributed, that is, most values are in the range [-3.5, 3.5], but there are some exceptional systematic outliers that are very differently distributed for large models. These outliers are often in the interval [-60, -6] or [6, 60]. Int8 quantization works well for values of magnitude ~5, but beyond that, there is a significant performance penalty. A good default threshold is 6, but a lower threshold might be needed for more unstable models (small models, fine-tuning). llm_int8_skip_modules (`List[str]`, *optional*): An explicit list of the modules that we do not want to convert in 8-bit. This is useful for models such as Jukebox that has several heads in different places and not necessarily at the last position. For example for `CausalLM` models, the last `lm_head` is kept in its original `dtype`. llm_int8_enable_fp32_cpu_offload (`bool`, *optional*, defaults to `False`): This flag is used for advanced use cases and users that are aware of this feature. If you want to split your model in different parts and run some parts in int8 on GPU and some parts in fp32 on CPU, you can use this flag. This is useful for offloading large models such as `google/flan-t5-xxl`. Note that the int8 operations will not be run on CPU. llm_int8_has_fp16_weight (`bool`, *optional*, defaults to `False`): This flag runs LLM.int8() with 16-bit main weights. This is useful for fine-tuning as the weights do not have to be converted back and forth for the backward pass. bnb_4bit_compute_dtype (`torch.dtype` or str, *optional*, defaults to `torch.float32`): This sets the computational type which might be different than the input type. For example, inputs might be fp32, but computation can be set to bf16 for speedups. bnb_4bit_quant_type (`str`, *optional*, defaults to `"fp4"`): This sets the quantization data type in the bnb.nn.Linear4Bit layers. Options are FP4 and NF4 data types which are specified by `fp4` or `nf4`. bnb_4bit_use_double_quant (`bool`, *optional*, defaults to `False`): This flag is used for nested quantization where the quantization constants from the first quantization are quantized again. bnb_4bit_quant_storage (`torch.dtype` or str, *optional*, defaults to `torch.uint8`): This sets the storage type to pack the quanitzed 4-bit prarams. kwargs (`Dict[str, Any]`, *optional*): Additional parameters from which to initialize the configuration object. """ def __init__( self, load_in_8bit=False, load_in_4bit=False, llm_int8_threshold=6.0, llm_int8_skip_modules=None, llm_int8_enable_fp32_cpu_offload=False, llm_int8_has_fp16_weight=False, bnb_4bit_compute_dtype=None, bnb_4bit_quant_type="fp4", bnb_4bit_use_double_quant=False, bnb_4bit_quant_storage=None, **kwargs, ): self.quant_method = QuantizationMethod.BITS_AND_BYTES if load_in_4bit and load_in_8bit: raise ValueError("load_in_4bit and load_in_8bit are both True, but only one can be used at the same time") self._load_in_8bit = load_in_8bit self._load_in_4bit = load_in_4bit self.llm_int8_threshold = llm_int8_threshold self.llm_int8_skip_modules = llm_int8_skip_modules self.llm_int8_enable_fp32_cpu_offload = llm_int8_enable_fp32_cpu_offload self.llm_int8_has_fp16_weight = llm_int8_has_fp16_weight self.bnb_4bit_quant_type = bnb_4bit_quant_type self.bnb_4bit_use_double_quant = bnb_4bit_use_double_quant if bnb_4bit_compute_dtype is None: self.bnb_4bit_compute_dtype = torch.float32 elif isinstance(bnb_4bit_compute_dtype, str): self.bnb_4bit_compute_dtype = getattr(torch, bnb_4bit_compute_dtype) elif isinstance(bnb_4bit_compute_dtype, torch.dtype): self.bnb_4bit_compute_dtype = bnb_4bit_compute_dtype else: raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype") if bnb_4bit_quant_storage is None: self.bnb_4bit_quant_storage = torch.uint8 elif isinstance(bnb_4bit_quant_storage, str): self.bnb_4bit_quant_storage = getattr(torch, bnb_4bit_quant_storage) elif isinstance(bnb_4bit_quant_storage, torch.dtype): self.bnb_4bit_quant_storage = bnb_4bit_quant_storage else: raise ValueError("bnb_4bit_quant_storage must be a string or a torch.dtype") if kwargs: logger.warning(f"Unused kwargs: {list(kwargs.keys())}. These kwargs are not used in {self.__class__}.") self.post_init() @property def load_in_4bit(self): return self._load_in_4bit @load_in_4bit.setter def load_in_4bit(self, value: bool): if not isinstance(value, bool): raise ValueError("load_in_4bit must be a boolean") if self.load_in_8bit and value: raise ValueError("load_in_4bit and load_in_8bit are both True, but only one can be used at the same time") self._load_in_4bit = value @property def load_in_8bit(self): return self._load_in_8bit @load_in_8bit.setter def load_in_8bit(self, value: bool): if not isinstance(value, bool): raise ValueError("load_in_8bit must be a boolean") if self.load_in_4bit and value: raise ValueError("load_in_4bit and load_in_8bit are both True, but only one can be used at the same time") self._load_in_8bit = value def post_init(self): r""" Safety checker that arguments are correct - also replaces some NoneType arguments with their default values. """ if not isinstance(self.load_in_4bit, bool): raise ValueError("load_in_4bit must be a boolean") if not isinstance(self.load_in_8bit, bool): raise ValueError("load_in_8bit must be a boolean") if not isinstance(self.llm_int8_threshold, float): raise ValueError("llm_int8_threshold must be a float") if self.llm_int8_skip_modules is not None and not isinstance(self.llm_int8_skip_modules, list): raise ValueError("llm_int8_skip_modules must be a list of strings") if not isinstance(self.llm_int8_enable_fp32_cpu_offload, bool): raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean") if not isinstance(self.llm_int8_has_fp16_weight, bool): raise ValueError("llm_int8_has_fp16_weight must be a boolean") if self.bnb_4bit_compute_dtype is not None and not isinstance(self.bnb_4bit_compute_dtype, torch.dtype): raise ValueError("bnb_4bit_compute_dtype must be torch.dtype") if not isinstance(self.bnb_4bit_quant_type, str): raise ValueError("bnb_4bit_quant_type must be a string") if not isinstance(self.bnb_4bit_use_double_quant, bool): raise ValueError("bnb_4bit_use_double_quant must be a boolean") if self.load_in_4bit and not version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse( "0.39.0" ): raise ValueError( "4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" ) def is_quantizable(self): r""" Returns `True` if the model is quantizable, `False` otherwise. """ return self.load_in_8bit or self.load_in_4bit def quantization_method(self): r""" This method returns the quantization method used for the model. If the model is not quantizable, it returns `None`. """ if self.load_in_8bit: return "llm_int8" elif self.load_in_4bit and self.bnb_4bit_quant_type == "fp4": return "fp4" elif self.load_in_4bit and self.bnb_4bit_quant_type == "nf4": return "nf4" else: return None def to_dict(self) -> Dict[str, Any]: """ Serializes this instance to a Python dictionary. Returns: `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. """ output = copy.deepcopy(self.__dict__) output["bnb_4bit_compute_dtype"] = str(output["bnb_4bit_compute_dtype"]).split(".")[1] output["bnb_4bit_quant_storage"] = str(output["bnb_4bit_quant_storage"]).split(".")[1] output["load_in_4bit"] = self.load_in_4bit output["load_in_8bit"] = self.load_in_8bit return output def __repr__(self): config_dict = self.to_dict() return f"{self.__class__.__name__} {json.dumps(config_dict, indent=2, sort_keys=True)}\n" def to_diff_dict(self) -> Dict[str, Any]: """ Removes all attributes from config which correspond to the default config attributes for better readability and serializes to a Python dictionary. Returns: `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance, """ config_dict = self.to_dict() # get the default config dict default_config_dict = BitsAndBytesConfig().to_dict() serializable_config_dict = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if value != default_config_dict[key]: serializable_config_dict[key] = value return serializable_config_dict class ExllamaVersion(int, Enum): ONE = 1 TWO = 2 @dataclass class GPTQConfig(QuantizationConfigMixin): """ This is a wrapper class about all possible attributes and features that you can play with a model that has been loaded using `optimum` api for gptq quantization relying on auto_gptq backend. Args: bits (`int`): The number of bits to quantize to, supported numbers are (2, 3, 4, 8). tokenizer (`str` or `PreTrainedTokenizerBase`, *optional*): The tokenizer used to process the dataset. You can pass either: - A custom tokenizer object. - A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co. - A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. dataset (`Union[List[str]]`, *optional*): The dataset used for quantization. You can provide your own dataset in a list of string or just use the original datasets used in GPTQ paper ['wikitext2','c4','c4-new','ptb','ptb-new'] group_size (`int`, *optional*, defaults to 128): The group size to use for quantization. Recommended value is 128 and -1 uses per-column quantization. damp_percent (`float`, *optional*, defaults to 0.1): The percent of the average Hessian diagonal to use for dampening. Recommended value is 0.1. desc_act (`bool`, *optional*, defaults to `False`): Whether to quantize columns in order of decreasing activation size. Setting it to False can significantly speed up inference but the perplexity may become slightly worse. Also known as act-order. sym (`bool`, *optional*, defaults to `True`): Whether to use symetric quantization. true_sequential (`bool`, *optional*, defaults to `True`): Whether to perform sequential quantization even within a single Transformer block. Instead of quantizing the entire block at once, we perform layer-wise quantization. As a result, each layer undergoes quantization using inputs that have passed through the previously quantized layers. use_cuda_fp16 (`bool`, *optional*, defaults to `False`): Whether or not to use optimized cuda kernel for fp16 model. Need to have model in fp16. model_seqlen (`int`, *optional*): The maximum sequence length that the model can take. block_name_to_quantize (`str`, *optional*): The transformers block name to quantize. If None, we will infer the block name using common patterns (e.g. model.layers) module_name_preceding_first_block (`List[str]`, *optional*): The layers that are preceding the first Transformer block. batch_size (`int`, *optional*, defaults to 1): The batch size used when processing the dataset pad_token_id (`int`, *optional*): The pad token id. Needed to prepare the dataset when `batch_size` > 1. use_exllama (`bool`, *optional*): Whether to use exllama backend. Defaults to `True` if unset. Only works with `bits` = 4. max_input_length (`int`, *optional*): The maximum input length. This is needed to initialize a buffer that depends on the maximum expected input length. It is specific to the exllama backend with act-order. exllama_config (`Dict[str, Any]`, *optional*): The exllama config. You can specify the version of the exllama kernel through the `version` key. Defaults to `{"version": 1}` if unset. cache_block_outputs (`bool`, *optional*, defaults to `True`): Whether to cache block outputs to reuse as inputs for the succeeding block. modules_in_block_to_quantize (`List[List[str]]`, *optional*): List of list of module names to quantize in the specified block. This argument is useful to exclude certain linear modules from being quantized. The block to quantize can be specified by setting `block_name_to_quantize`. We will quantize each list sequentially. If not set, we will quantize all linear layers. Example: `modules_in_block_to_quantize =[["self_attn.k_proj", "self_attn.v_proj", "self_attn.q_proj"], ["self_attn.o_proj"]]`. In this example, we will first quantize the q,k,v layers simultaneously since they are independent. Then, we will quantize `self_attn.o_proj` layer with the q,k,v layers quantized. This way, we will get better results since it reflects the real input `self_attn.o_proj` will get when the model is quantized. """ def __init__( self, bits: int, tokenizer: Any = None, dataset: Optional[Union[List[str], str]] = None, group_size: int = 128, damp_percent: float = 0.1, desc_act: bool = False, sym: bool = True, true_sequential: bool = True, use_cuda_fp16: bool = False, model_seqlen: Optional[int] = None, block_name_to_quantize: Optional[str] = None, module_name_preceding_first_block: Optional[List[str]] = None, batch_size: int = 1, pad_token_id: Optional[int] = None, use_exllama: Optional[bool] = None, max_input_length: Optional[int] = None, exllama_config: Optional[Dict[str, Any]] = None, cache_block_outputs: bool = True, modules_in_block_to_quantize: Optional[List[List[str]]] = None, **kwargs, ): self.quant_method = QuantizationMethod.GPTQ self.bits = bits self.tokenizer = tokenizer self.dataset = dataset self.group_size = group_size self.damp_percent = damp_percent self.desc_act = desc_act self.sym = sym self.true_sequential = true_sequential self.use_cuda_fp16 = use_cuda_fp16 self.model_seqlen = model_seqlen self.block_name_to_quantize = block_name_to_quantize self.module_name_preceding_first_block = module_name_preceding_first_block self.batch_size = batch_size self.pad_token_id = pad_token_id self.use_exllama = use_exllama self.max_input_length = max_input_length self.exllama_config = exllama_config self.disable_exllama = kwargs.pop("disable_exllama", None) self.cache_block_outputs = cache_block_outputs self.modules_in_block_to_quantize = modules_in_block_to_quantize self.post_init() def get_loading_attributes(self): attibutes_dict = copy.deepcopy(self.__dict__) loading_attibutes = ["disable_exllama", "use_exllama", "exllama_config", "use_cuda_fp16", "max_input_length"] loading_attibutes_dict = {i: j for i, j in attibutes_dict.items() if i in loading_attibutes} return loading_attibutes_dict def post_init(self): r""" Safety checker that arguments are correct """ if self.bits not in [2, 3, 4, 8]: raise ValueError(f"Only support quantization to [2,3,4,8] bits but found {self.bits}") if self.group_size != -1 and self.group_size <= 0: raise ValueError("group_size must be greater than 0 or equal to -1") if not (0 < self.damp_percent < 1): raise ValueError("damp_percent must between 0 and 1.") if self.dataset is not None: if isinstance(self.dataset, str): if self.dataset not in ["wikitext2", "c4", "c4-new", "ptb", "ptb-new"]: raise ValueError( f"""You have entered a string value for dataset. You can only choose between ['wikitext2','c4','c4-new','ptb','ptb-new'], but we found {self.dataset}""" ) elif not isinstance(self.dataset, list): raise ValueError( f"""dataset needs to be either a list of string or a value in ['wikitext2','c4','c4-new','ptb','ptb-new'], but we found {self.dataset}""" ) if self.disable_exllama is None and self.use_exllama is None: # New default behaviour self.use_exllama = True elif self.disable_exllama is not None and self.use_exllama is None: # Follow pattern of old config logger.warning( "Using `disable_exllama` is deprecated and will be removed in version 4.37. Use `use_exllama` instead and specify the version with `exllama_config`." "The value of `use_exllama` will be overwritten by `disable_exllama` passed in `GPTQConfig` or stored in your config file." ) self.use_exllama = not self.disable_exllama self.disable_exllama = None elif self.disable_exllama is not None and self.use_exllama is not None: # Only happens if user explicitly passes in both arguments raise ValueError("Cannot specify both `disable_exllama` and `use_exllama`. Please use just `use_exllama`") if self.exllama_config is None: self.exllama_config = {"version": ExllamaVersion.ONE} else: if "version" not in self.exllama_config: raise ValueError("`exllama_config` needs to have a `version` key.") elif self.exllama_config["version"] not in [ExllamaVersion.ONE, ExllamaVersion.TWO]: exllama_version = self.exllama_config["version"] raise ValueError( f"Only supported versions are in [ExllamaVersion.ONE, ExllamaVersion.TWO] - not recognized version {exllama_version}" ) if self.bits == 4 and self.use_exllama: if self.exllama_config["version"] == ExllamaVersion.ONE: logger.info( "You have activated exllama backend. Note that you can get better inference " "speed using exllamav2 kernel by setting `exllama_config`." ) elif self.exllama_config["version"] == ExllamaVersion.TWO: optimum_version = version.parse(importlib.metadata.version("optimum")) autogptq_version = version.parse(importlib.metadata.version("auto_gptq")) if optimum_version <= version.parse("1.13.2") or autogptq_version <= version.parse("0.4.2"): raise ValueError( f"You need optimum > 1.13.2 and auto-gptq > 0.4.2 . Make sure to have that version installed - detected version : optimum {optimum_version} and autogptq {autogptq_version}" ) if self.modules_in_block_to_quantize is not None: optimum_version = version.parse(importlib.metadata.version("optimum")) if optimum_version < version.parse("1.15.0"): raise ValueError( "You current version of `optimum` does not support `modules_in_block_to_quantize` quantization argument, please upgrade `optimum` package to a version superior than 1.15.0 ." ) def to_dict(self): config_dict = super().to_dict() config_dict.pop("disable_exllama", None) return config_dict def to_dict_optimum(self): """ Get compatible dict for optimum gptq config """ quant_dict = self.to_dict() # make it compatible with optimum config quant_dict["disable_exllama"] = not self.use_exllama return quant_dict @classmethod def from_dict_optimum(cls, config_dict): """ Get compatible class with optimum gptq config dict """ if "disable_exllama" in config_dict: config_dict["use_exllama"] = not config_dict["disable_exllama"] # switch to None to not trigger the warning config_dict["disable_exllama"] = None config = cls(**config_dict) return config @dataclass class AwqConfig(QuantizationConfigMixin): """ This is a wrapper class about all possible attributes and features that you can play with a model that has been loaded using `auto-awq` library awq quantization relying on auto_awq backend. Args: bits (`int`, *optional*, defaults to 4): The number of bits to quantize to. group_size (`int`, *optional*, defaults to 128): The group size to use for quantization. Recommended value is 128 and -1 uses per-column quantization. zero_point (`bool`, *optional*, defaults to `True`): Whether to use zero point quantization. version (`AWQLinearVersion`, *optional*, defaults to `AWQLinearVersion.GEMM`): The version of the quantization algorithm to use. GEMM is better for big batch_size (e.g. >= 8) otherwise, GEMV is better (e.g. < 8 ). GEMM models are compatible with Exllama kernels. backend (`AwqBackendPackingMethod`, *optional*, defaults to `AwqBackendPackingMethod.AUTOAWQ`): The quantization backend. Some models might be quantized using `llm-awq` backend. This is useful for users that quantize their own models using `llm-awq` library. do_fuse (`bool`, *optional*, defaults to `False`): Whether to fuse attention and mlp layers together for faster inference fuse_max_seq_len (`int`, *optional*): The Maximum sequence length to generate when using fusing. modules_to_fuse (`dict`, *optional*, default to `None`): Overwrite the natively supported fusing scheme with the one specified by the users. modules_to_not_convert (`list`, *optional*, default to `None`): The list of modules to not quantize, useful for quantizing models that explicitly require to have some modules left in their original precision (e.g. Whisper encoder, Llava encoder, Mixtral gate layers). Note you cannot quantize directly with transformers, please refer to `AutoAWQ` documentation for quantizing HF models. exllama_config (`Dict[str, Any]`, *optional*): You can specify the version of the exllama kernel through the `version` key, the maximum sequence length through the `max_input_len` key, and the maximum batch size through the `max_batch_size` key. Defaults to `{"version": 2, "max_input_len": 2048, "max_batch_size": 8}` if unset. """ def __init__( self, bits: int = 4, group_size: int = 128, zero_point: bool = True, version: AWQLinearVersion = AWQLinearVersion.GEMM, backend: AwqBackendPackingMethod = AwqBackendPackingMethod.AUTOAWQ, do_fuse: Optional[bool] = None, fuse_max_seq_len: Optional[int] = None, modules_to_fuse: Optional[dict] = None, modules_to_not_convert: Optional[List] = None, exllama_config: Optional[Dict[str, int]] = None, **kwargs, ): self.quant_method = QuantizationMethod.AWQ self.bits = bits self.group_size = group_size self.zero_point = zero_point self.version = version self.backend = backend self.fuse_max_seq_len = fuse_max_seq_len self.modules_to_not_convert = modules_to_not_convert self.exllama_config = exllama_config self.modules_to_fuse = modules_to_fuse if do_fuse is None: self.do_fuse = modules_to_fuse is not None and len(modules_to_fuse) > 0 else: self.do_fuse = do_fuse self.fuse_max_seq_len = fuse_max_seq_len self.post_init() def post_init(self): r""" Safety checker that arguments are correct """ if not torch.cuda.is_available(): raise ValueError("AWQ is only available on GPU") if self.backend not in [AwqBackendPackingMethod.AUTOAWQ, AwqBackendPackingMethod.LLMAWQ]: raise ValueError( f"Only supported quantization backends in {AwqBackendPackingMethod.AUTOAWQ} and {AwqBackendPackingMethod.LLMAWQ} - not recognized backend {self.backend}" ) self.version = AWQLinearVersion.from_str(self.version) if self.version not in [AWQLinearVersion.GEMM, AWQLinearVersion.GEMV, AWQLinearVersion.EXLLAMA]: raise ValueError( f"Only supported versions are in [AWQLinearVersion.GEMM, AWQLinearVersion.GEMV, AWQLinearVersion.EXLLAMA] - not recognized version {self.version}" ) if self.backend == AwqBackendPackingMethod.LLMAWQ: compute_capability = torch.cuda.get_device_capability() major, minor = compute_capability if major < 8: raise ValueError("LLM-AWQ backend is only supported on GPUs with compute capability >= 8.0") if self.do_fuse and self.fuse_max_seq_len is None: raise ValueError( "You cannot enable fused modules without specifying a `fuse_max_seq_len`, make sure to pass a valid `fuse_max_seq_len` for your usecase" ) if self.do_fuse: awq_version_supports_fusing = False MIN_AWQ_VERSION = "0.1.7" if is_auto_awq_available(): awq_version_supports_fusing = version.parse(importlib.metadata.version("autoawq")) >= version.parse( MIN_AWQ_VERSION ) if not awq_version_supports_fusing: raise ValueError( f"You current version of `autoawq` does not support module fusing, please upgrade `autoawq` package to at least {MIN_AWQ_VERSION}." ) if self.modules_to_not_convert is not None: awq_version_supports_non_conversion = False MIN_AWQ_VERSION = "0.1.8" if is_auto_awq_available(): awq_version_supports_non_conversion = version.parse( importlib.metadata.version("autoawq") ) >= version.parse(MIN_AWQ_VERSION) if not awq_version_supports_non_conversion: raise ValueError( f"You current version of `autoawq` does not support module quantization skipping, please upgrade `autoawq` package to at least {MIN_AWQ_VERSION}." ) if self.do_fuse and self.modules_to_fuse is not None: required_keys = [ "hidden_size", "num_attention_heads", "num_key_value_heads", "mlp", "attention", "layernorm", "use_alibi", ] if not all(key in self.modules_to_fuse for key in required_keys): raise ValueError( f"Required fields are missing in the fusing mapping, required fields are {required_keys}" ) if self.version == AWQLinearVersion.EXLLAMA: awq_version_supports_exllama = False MIN_AWQ_VERSION = "0.2.0" if is_auto_awq_available(): awq_version_supports_exllama = version.parse(importlib.metadata.version("autoawq")) >= version.parse( MIN_AWQ_VERSION ) if not awq_version_supports_exllama: raise ValueError( f"You current version of `autoawq` does not support exllama backend, " f"please upgrade `autoawq` package to at least {MIN_AWQ_VERSION}." ) if self.exllama_config is None: self.exllama_config = {"version": ExllamaVersion.TWO, "max_input_len": 2048, "max_batch_size": 8} else: if "version" not in self.exllama_config: raise ValueError("`exllama_config` needs to have a `version` key.") elif self.exllama_config["version"] not in [ExllamaVersion.ONE, ExllamaVersion.TWO]: exllama_version = self.exllama_config["version"] raise ValueError( f"Only supported versions are in [ExllamaVersion.ONE, ExllamaVersion.TWO] - not recognized version {exllama_version}" ) def get_loading_attributes(self): attibutes_dict = copy.deepcopy(self.__dict__) loading_attibutes = ["version", "do_fuse", "modules_to_fuse", "fuse_max_seq_len", "exllama_config"] loading_attibutes_dict = {i: j for i, j in attibutes_dict.items() if i in loading_attibutes} return loading_attibutes_dict @dataclass class AqlmConfig(QuantizationConfigMixin): """ This is a wrapper class about `aqlm` parameters. Args: in_group_size (`int`, *optional*, defaults to 8): The group size along the input dimension. out_group_size (`int`, *optional*, defaults to 1): The group size along the output dimension. It's recommended to always use 1. num_codebooks (`int`, *optional*, defaults to 1): Number of codebooks for the Additive Quantization procedure. nbits_per_codebook (`int`, *optional*, defaults to 16): Number of bits encoding a single codebook vector. Codebooks size is 2**nbits_per_codebook. linear_weights_not_to_quantize (`Optional[List[str]]`, *optional*): List of full paths of `nn.Linear` weight parameters that shall not be quantized. kwargs (`Dict[str, Any]`, *optional*): Additional parameters from which to initialize the configuration object. """ def __init__( self, in_group_size: int = 8, out_group_size: int = 1, num_codebooks: int = 1, nbits_per_codebook: int = 16, linear_weights_not_to_quantize: Optional[List[str]] = None, **kwargs, ): self.quant_method = QuantizationMethod.AQLM self.in_group_size = in_group_size self.out_group_size = out_group_size self.num_codebooks = num_codebooks self.nbits_per_codebook = nbits_per_codebook self.linear_weights_not_to_quantize = linear_weights_not_to_quantize self.post_init() def post_init(self): r""" Safety checker that arguments are correct - also replaces some NoneType arguments with their default values. """ if not isinstance(self.in_group_size, int): raise ValueError("in_group_size must be a float") if not isinstance(self.out_group_size, int): raise ValueError("out_group_size must be a float") if not isinstance(self.num_codebooks, int): raise ValueError("num_codebooks must be a float") if not isinstance(self.nbits_per_codebook, int): raise ValueError("nbits_per_codebook must be a float") if self.linear_weights_not_to_quantize is not None and not isinstance( self.linear_weights_not_to_quantize, list ): raise ValueError("linear_weights_not_to_quantize must be a list of strings") if self.linear_weights_not_to_quantize is None: self.linear_weights_not_to_quantize = [] @dataclass class QuantoConfig(QuantizationConfigMixin): """ This is a wrapper class about all possible attributes and features that you can play with a model that has been loaded using `quanto`. Args: weights (`str`, *optional*, defaults to `"int8"`): The target dtype for the weights after quantization. Supported values are ("float8","int8","int4","int2") activations (`str`, *optional*): The target dtype for the activations after quantization. Supported values are (None,"int8","float8") modules_to_not_convert (`list`, *optional*, default to `None`): The list of modules to not quantize, useful for quantizing models that explicitly require to have some modules left in their original precision (e.g. Whisper encoder, Llava encoder, Mixtral gate layers). """ def __init__( self, weights="int8", activations=None, modules_to_not_convert: Optional[List] = None, **kwargs, ): self.quant_method = QuantizationMethod.QUANTO self.weights = weights self.activations = activations self.modules_to_not_convert = modules_to_not_convert self.post_init() def post_init(self): r""" Safety checker that arguments are correct """ accepted_weights = ["float8", "int8", "int4", "int2"] accepted_activations = [None, "int8", "float8"] if self.weights not in accepted_weights: raise ValueError(f"Only support weights in {accepted_weights} but found {self.weights}") if self.activations not in accepted_activations: raise ValueError(f"Only support weights in {accepted_activations} but found {self.activations}") @dataclass class EetqConfig(QuantizationConfigMixin): """ This is a wrapper class about all possible attributes and features that you can play with a model that has been loaded using `eetq`. Args: weights (`str`, *optional*, defaults to `"int8"`): The target dtype for the weights. Supported value is only "int8" modules_to_not_convert (`list`, *optional*, default to `None`): The list of modules to not quantize, useful for quantizing models that explicitly require to have some modules left in their original precision. """ def __init__( self, weights: str = "int8", modules_to_not_convert: Optional[List] = None, **kwargs, ): self.quant_method = QuantizationMethod.EETQ self.weights = weights self.modules_to_not_convert = modules_to_not_convert self.post_init() def post_init(self): r""" Safety checker that arguments are correct """ accepted_weights = ["int8"] if self.weights not in accepted_weights: raise ValueError(f"Only support weights in {accepted_weights} but found {self.weights}")
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/dummy_pt_objects.py
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class PyTorchBenchmark(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PyTorchBenchmarkArguments(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Cache(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DynamicCache(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SinkCache(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class StaticCache(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GlueDataset(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GlueDataTrainingArguments(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LineByLineTextDataset(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LineByLineWithRefDataset(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LineByLineWithSOPTextDataset(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SquadDataset(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SquadDataTrainingArguments(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TextDataset(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TextDatasetForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlternatingCodebooksLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BeamScorer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BeamSearchScorer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClassifierFreeGuidanceLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConstrainedBeamSearchScorer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Constraint(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConstraintListState(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DisjunctiveConstraint(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EncoderNoRepeatNGramLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EncoderRepetitionPenaltyLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EosTokenCriteria(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EpsilonLogitsWarper(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EtaLogitsWarper(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ExponentialDecayLengthPenalty(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ForcedBOSTokenLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ForcedEOSTokenLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ForceTokensLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GenerationMixin(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class HammingDiversityLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class InfNanRemoveLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LogitNormalization(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LogitsProcessorList(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LogitsWarper(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MaxLengthCriteria(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MaxTimeCriteria(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MinLengthLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MinNewTokensLengthLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NoBadWordsLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NoRepeatNGramLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PhrasalConstraint(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PrefixConstrainedLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RepetitionPenaltyLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SequenceBiasLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class StoppingCriteria(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class StoppingCriteriaList(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class StopStringCriteria(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SuppressTokensAtBeginLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SuppressTokensLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TemperatureLogitsWarper(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TopKLogitsWarper(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TopPLogitsWarper(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TypicalLogitsWarper(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UnbatchedClassifierFreeGuidanceLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WhisperTimeStampLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class AlbertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlbertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlbertForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlbertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlbertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlbertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlbertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlbertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_albert(*args, **kwargs): requires_backends(load_tf_weights_in_albert, ["torch"]) ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST = None class AlignModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlignPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlignTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlignVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None class AltCLIPModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AltCLIPPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AltCLIPTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AltCLIPVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class ASTForAudioClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ASTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ASTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = None MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING = None MODEL_FOR_AUDIO_XVECTOR_MAPPING = None MODEL_FOR_BACKBONE_MAPPING = None MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING = None MODEL_FOR_CAUSAL_LM_MAPPING = None MODEL_FOR_CTC_MAPPING = None MODEL_FOR_DEPTH_ESTIMATION_MAPPING = None MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = None MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = None MODEL_FOR_IMAGE_MAPPING = None MODEL_FOR_IMAGE_SEGMENTATION_MAPPING = None MODEL_FOR_IMAGE_TO_IMAGE_MAPPING = None MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING = None MODEL_FOR_KEYPOINT_DETECTION_MAPPING = None MODEL_FOR_MASK_GENERATION_MAPPING = None MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = None MODEL_FOR_MASKED_LM_MAPPING = None MODEL_FOR_MULTIPLE_CHOICE_MAPPING = None MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = None MODEL_FOR_OBJECT_DETECTION_MAPPING = None MODEL_FOR_PRETRAINING_MAPPING = None MODEL_FOR_QUESTION_ANSWERING_MAPPING = None MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = None MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = None MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = None MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = None MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = None MODEL_FOR_TEXT_ENCODING_MAPPING = None MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING = None MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING = None MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING = None MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING = None MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING = None MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING = None MODEL_FOR_VISION_2_SEQ_MAPPING = None MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING = None MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING = None MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING = None MODEL_MAPPING = None MODEL_WITH_LM_HEAD_MAPPING = None class AutoBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForAudioClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForAudioFrameClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForAudioXVector(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForDepthEstimation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForDocumentQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForImageSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForImageToImage(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForInstanceSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForKeypointDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForMaskedImageModeling(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForMaskGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForSeq2SeqLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForSpeechSeq2Seq(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForTableQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForTextEncoding(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForTextToSpectrogram(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForTextToWaveform(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForUniversalSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForVideoClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForVision2Seq(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForVisualQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForZeroShotImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForZeroShotObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelWithLMHead(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class AutoformerForPrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BARK_PRETRAINED_MODEL_ARCHIVE_LIST = None class BarkCausalModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BarkCoarseModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BarkFineModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BarkModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BarkPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BarkSemanticModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BART_PRETRAINED_MODEL_ARCHIVE_LIST = None class BartForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BartForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BartForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BartForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BartModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BartPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BartPretrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PretrainedBartModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BEIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class BeitBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BeitForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BeitForMaskedImageModeling(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BeitForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BeitModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BeitPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class BertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertLMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_bert(*args, **kwargs): requires_backends(load_tf_weights_in_bert, ["torch"]) class BertGenerationDecoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertGenerationEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertGenerationPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_bert_generation(*args, **kwargs): requires_backends(load_tf_weights_in_bert_generation, ["torch"]) BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST = None class BigBirdForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_big_bird(*args, **kwargs): requires_backends(load_tf_weights_in_big_bird, ["torch"]) BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST = None class BigBirdPegasusForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdPegasusForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdPegasusForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdPegasusForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdPegasusModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdPegasusPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST = None class BioGptForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BioGptForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BioGptForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BioGptModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BioGptPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class BitBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BitForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BitModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BitPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST = None class BlenderbotForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlenderbotForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlenderbotModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlenderbotPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST = None class BlenderbotSmallForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlenderbotSmallForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlenderbotSmallModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlenderbotSmallPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None class BlipForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlipForImageTextRetrieval(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlipForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlipModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlipPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlipTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlipVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST = None class Blip2ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Blip2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Blip2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Blip2QFormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Blip2VisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST = None class BloomForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BloomForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BloomForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BloomForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BloomModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BloomPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST = None class BridgeTowerForContrastiveLearning(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BridgeTowerForImageAndTextRetrieval(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BridgeTowerForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BridgeTowerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BridgeTowerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BROS_PRETRAINED_MODEL_ARCHIVE_LIST = None class BrosForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BrosModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BrosPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BrosProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BrosSpadeEEForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BrosSpadeELForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class CamembertForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CamembertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CamembertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CamembertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CamembertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CamembertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CamembertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CamembertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CANINE_PRETRAINED_MODEL_ARCHIVE_LIST = None class CanineForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CanineForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CanineForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CanineForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CanineLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CanineModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CaninePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_canine(*args, **kwargs): requires_backends(load_tf_weights_in_canine, ["torch"]) CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None class ChineseCLIPModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ChineseCLIPPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ChineseCLIPTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ChineseCLIPVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CLAP_PRETRAINED_MODEL_ARCHIVE_LIST = None class ClapAudioModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClapAudioModelWithProjection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClapFeatureExtractor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClapModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClapPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClapTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClapTextModelWithProjection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None class CLIPForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPTextModelWithProjection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPVisionModelWithProjection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST = None class CLIPSegForImageSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPSegModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPSegPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPSegTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPSegVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CLVP_PRETRAINED_MODEL_ARCHIVE_LIST = None class ClvpDecoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClvpEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClvpForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClvpModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClvpModelForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClvpPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST = None class CodeGenForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CodeGenModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CodeGenPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CohereForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CohereModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CoherePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None class ConditionalDetrForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConditionalDetrForSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConditionalDetrModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConditionalDetrPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class ConvBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvBertLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_convbert(*args, **kwargs): requires_backends(load_tf_weights_in_convbert, ["torch"]) CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None class ConvNextBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvNextForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvNextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvNextPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST = None class ConvNextV2Backbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvNextV2ForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvNextV2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvNextV2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST = None class CpmAntForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CpmAntModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CpmAntPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CTRL_PRETRAINED_MODEL_ARCHIVE_LIST = None class CTRLForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CTRLLMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CTRLModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CTRLPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CVT_PRETRAINED_MODEL_ARCHIVE_LIST = None class CvtForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CvtModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CvtPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST = None DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST = None class Data2VecAudioForAudioFrameClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecAudioForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecAudioForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecAudioForXVector(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecAudioModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecAudioPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecTextForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecTextForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecTextForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecTextForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecTextForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecTextForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecTextPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecVisionForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecVisionForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecVisionPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DbrxForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DbrxModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DbrxPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None class DebertaForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None class DebertaV2ForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaV2ForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaV2ForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaV2ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaV2ForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaV2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaV2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class DecisionTransformerGPT2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DecisionTransformerGPT2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DecisionTransformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DecisionTransformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None class DeformableDetrForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DeformableDetrModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DeformableDetrPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DEIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class DeiTForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DeiTForImageClassificationWithTeacher(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DeiTForMaskedImageModeling(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DeiTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DeiTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST = None class MCTCTForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MCTCTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MCTCTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MMBTForClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MMBTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ModalEmbeddings(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OpenLlamaForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OpenLlamaForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OpenLlamaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OpenLlamaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class RetriBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RetriBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TrajectoryTransformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TrajectoryTransformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None class AdaptiveEmbedding(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TransfoXLForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TransfoXLLMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TransfoXLModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TransfoXLPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_transfo_xl(*args, **kwargs): requires_backends(load_tf_weights_in_transfo_xl, ["torch"]) VAN_PRETRAINED_MODEL_ARCHIVE_LIST = None class VanForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VanModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VanPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DEPTH_ANYTHING_PRETRAINED_MODEL_ARCHIVE_LIST = None class DepthAnythingForDepthEstimation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DepthAnythingPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DETA_PRETRAINED_MODEL_ARCHIVE_LIST = None class DetaForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DetaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DetaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None class DetrForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DetrForSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DetrModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DetrPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DINAT_PRETRAINED_MODEL_ARCHIVE_LIST = None class DinatBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DinatForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DinatModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DinatPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST = None class Dinov2Backbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Dinov2ForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Dinov2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Dinov2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class DistilBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DistilBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DistilBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DistilBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DistilBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DistilBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DistilBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = None class DonutSwinModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DonutSwinPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST = None class DPRContextEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPRPretrainedContextEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPRPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPRPretrainedQuestionEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPRPretrainedReader(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPRQuestionEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPRReader(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DPT_PRETRAINED_MODEL_ARCHIVE_LIST = None class DPTForDepthEstimation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPTForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class EfficientFormerForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EfficientFormerForImageClassificationWithTeacher(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EfficientFormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EfficientFormerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class EfficientNetForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EfficientNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EfficientNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = None class ElectraForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ElectraForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ElectraForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ElectraForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ElectraForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ElectraForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ElectraForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ElectraModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ElectraPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_electra(*args, **kwargs): requires_backends(load_tf_weights_in_electra, ["torch"]) ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST = None class EncodecModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EncodecPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EncoderDecoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST = None class ErnieForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErniePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST = None class ErnieMForInformationExtraction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieMForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieMForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieMForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieMForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieMModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieMPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ESM_PRETRAINED_MODEL_ARCHIVE_LIST = None class EsmFoldPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EsmForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EsmForProteinFolding(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EsmForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EsmForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EsmModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EsmPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) FALCON_PRETRAINED_MODEL_ARCHIVE_LIST = None class FalconForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FalconForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FalconForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FalconForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FalconModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FalconPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) FASTSPEECH2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class FastSpeech2ConformerHifiGan(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FastSpeech2ConformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FastSpeech2ConformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FastSpeech2ConformerWithHifiGan(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class FlaubertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlaubertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlaubertForQuestionAnsweringSimple(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlaubertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlaubertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlaubertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlaubertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlaubertWithLMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST = None class FlavaForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlavaImageCodebook(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlavaImageModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlavaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlavaMultimodalModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlavaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlavaTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) FNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class FNetForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class FocalNetBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FocalNetForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FocalNetForMaskedImageModeling(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FocalNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FocalNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FSMTForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FSMTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PretrainedFSMTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST = None class FunnelBaseModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FunnelForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FunnelForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FunnelForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FunnelForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FunnelForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FunnelForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FunnelModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FunnelPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_funnel(*args, **kwargs): requires_backends(load_tf_weights_in_funnel, ["torch"]) class FuyuForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FuyuPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GemmaForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GemmaForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GemmaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GemmaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class GitForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GitModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GitPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GitVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GLPN_PRETRAINED_MODEL_ARCHIVE_LIST = None class GLPNForDepthEstimation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GLPNModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GLPNPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = None class GPT2DoubleHeadsModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPT2ForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPT2ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPT2ForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPT2LMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPT2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPT2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_gpt2(*args, **kwargs): requires_backends(load_tf_weights_in_gpt2, ["torch"]) GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST = None class GPTBigCodeForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTBigCodeForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTBigCodeForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTBigCodeModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTBigCodePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST = None class GPTNeoForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_gpt_neo(*args, **kwargs): requires_backends(load_tf_weights_in_gpt_neo, ["torch"]) GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST = None class GPTNeoXForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST = None class GPTNeoXJapaneseForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXJapaneseLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXJapaneseModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXJapanesePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST = None class GPTJForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTJForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTJForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTJModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTJPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST = None class GPTSanJapaneseForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTSanJapaneseModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTSanJapanesePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class GraphormerForGraphClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GraphormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GraphormerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GROUNDING_DINO_PRETRAINED_MODEL_ARCHIVE_LIST = None class GroundingDinoForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GroundingDinoModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GroundingDinoPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class GroupViTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GroupViTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GroupViTTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GroupViTVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class HubertForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class HubertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class HubertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class HubertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) IBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class IBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST = None class IdeficsForVisionText2Text(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IdeficsModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IdeficsPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IdeficsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) IDEFICS2_PRETRAINED_MODEL_ARCHIVE_LIST = None class Idefics2ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Idefics2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Idefics2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Idefics2Processor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST = None class ImageGPTForCausalImageModeling(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ImageGPTForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ImageGPTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ImageGPTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_imagegpt(*args, **kwargs): requires_backends(load_tf_weights_in_imagegpt, ["torch"]) INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class InformerForPrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class InformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class InformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None class InstructBlipForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class InstructBlipPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class InstructBlipQFormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class InstructBlipVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class JambaForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class JambaForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class JambaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class JambaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST = None class JukeboxModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class JukeboxPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class JukeboxPrior(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class JukeboxVQVAE(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST = None class Kosmos2ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Kosmos2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Kosmos2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = None class LayoutLMForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST = None class LayoutLMv2ForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMv2ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMv2ForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMv2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMv2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST = None class LayoutLMv3ForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMv3ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMv3ForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMv3Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMv3PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LED_PRETRAINED_MODEL_ARCHIVE_LIST = None class LEDForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LEDForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LEDForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LEDModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LEDPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class LevitForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LevitForImageClassificationWithTeacher(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LevitModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LevitPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LILT_PRETRAINED_MODEL_ARCHIVE_LIST = None class LiltForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LiltForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LiltForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LiltModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LiltPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LlamaForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LlamaForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LlamaForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LlamaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LlamaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LLAVA_PRETRAINED_MODEL_ARCHIVE_LIST = None class LlavaForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LlavaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LLAVA_NEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None class LlavaNextForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LlavaNextPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class LongformerForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongformerForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongformerForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongformerForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongformerForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongformerSelfAttention(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST = None class LongT5EncoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongT5Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongT5PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LUKE_PRETRAINED_MODEL_ARCHIVE_LIST = None class LukeForEntityClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukeForEntityPairClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukeForEntitySpanClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukeForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukeForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukeForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukeForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukeForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukeModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LxmertEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LxmertForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LxmertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LxmertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LxmertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LxmertVisualFeatureEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LxmertXLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST = None class M2M100ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class M2M100Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class M2M100PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MAMBA_PRETRAINED_MODEL_ARCHIVE_LIST = None class MambaForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MambaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MambaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MarianForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MarianModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MarianMTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST = None class MarkupLMForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MarkupLMForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MarkupLMForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MarkupLMModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MarkupLMPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class Mask2FormerForUniversalSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Mask2FormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Mask2FormerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class MaskFormerForInstanceSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MaskFormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MaskFormerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MaskFormerSwinBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MBartForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MBartForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MBartForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MBartForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MBartModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MBartPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MEGA_PRETRAINED_MODEL_ARCHIVE_LIST = None class MegaForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegaForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegaForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegaForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegaForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegaForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class MegatronBertForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST = None class MgpstrForSceneTextRecognition(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MgpstrModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MgpstrPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MistralForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MistralForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MistralModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MistralPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MixtralForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MixtralForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MixtralModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MixtralPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class MobileBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_mobilebert(*args, **kwargs): requires_backends(load_tf_weights_in_mobilebert, ["torch"]) MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST = None class MobileNetV1ForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileNetV1Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileNetV1PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_mobilenet_v1(*args, **kwargs): requires_backends(load_tf_weights_in_mobilenet_v1, ["torch"]) MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None class MobileNetV2ForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileNetV2ForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileNetV2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileNetV2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_mobilenet_v2(*args, **kwargs): requires_backends(load_tf_weights_in_mobilenet_v2, ["torch"]) MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class MobileViTForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileViTForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileViTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileViTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST = None class MobileViTV2ForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileViTV2ForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileViTV2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileViTV2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class MPNetForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MPNetForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MPNetForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MPNetForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MPNetForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MPNetLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MPNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MPNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MPT_PRETRAINED_MODEL_ARCHIVE_LIST = None class MptForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MptForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MptForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MptForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MptModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MptPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MRA_PRETRAINED_MODEL_ARCHIVE_LIST = None class MraForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MraForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MraForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MraForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MraForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MraModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MraPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MT5EncoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MT5ForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MT5ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MT5ForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MT5Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MT5PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MUSICGEN_PRETRAINED_MODEL_ARCHIVE_LIST = None class MusicgenForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MusicgenForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MusicgenModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MusicgenPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MusicgenProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST = None class MusicgenMelodyForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MusicgenMelodyForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MusicgenMelodyModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MusicgenMelodyPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MVP_PRETRAINED_MODEL_ARCHIVE_LIST = None class MvpForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MvpForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MvpForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MvpForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MvpModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MvpPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) NAT_PRETRAINED_MODEL_ARCHIVE_LIST = None class NatBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NatForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NatModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NatPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST = None class NezhaForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NezhaForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NezhaForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NezhaForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NezhaForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NezhaForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NezhaForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NezhaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NezhaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST = None class NllbMoeForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NllbMoeModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NllbMoePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NllbMoeSparseMLP(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NllbMoeTop2Router(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class NystromformerForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NystromformerForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NystromformerForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NystromformerForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NystromformerForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NystromformerLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NystromformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NystromformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OlmoForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OlmoModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OlmoPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ONEFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class OneFormerForUniversalSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OneFormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OneFormerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = None class OpenAIGPTDoubleHeadsModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OpenAIGPTForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OpenAIGPTLMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OpenAIGPTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OpenAIGPTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_openai_gpt(*args, **kwargs): requires_backends(load_tf_weights_in_openai_gpt, ["torch"]) OPT_PRETRAINED_MODEL_ARCHIVE_LIST = None class OPTForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OPTForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OPTForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OPTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OPTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) OWLV2_PRETRAINED_MODEL_ARCHIVE_LIST = None class Owlv2ForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Owlv2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Owlv2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Owlv2TextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Owlv2VisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class OwlViTForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OwlViTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OwlViTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OwlViTTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OwlViTVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PATCHTSMIXER_PRETRAINED_MODEL_ARCHIVE_LIST = None class PatchTSMixerForPrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PatchTSMixerForPretraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PatchTSMixerForRegression(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PatchTSMixerForTimeSeriesClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PatchTSMixerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PatchTSMixerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST = None class PatchTSTForClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PatchTSTForPrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PatchTSTForPretraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PatchTSTForRegression(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PatchTSTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PatchTSTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PegasusForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PegasusForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PegasusModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PegasusPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST = None class PegasusXForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PegasusXModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PegasusXPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST = None class PerceiverForImageClassificationConvProcessing(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverForImageClassificationFourier(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverForImageClassificationLearned(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverForMultimodalAutoencoding(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverForOpticalFlow(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PersimmonForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PersimmonForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PersimmonModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PersimmonPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PHI_PRETRAINED_MODEL_ARCHIVE_LIST = None class PhiForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PhiForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PhiForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PhiModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PhiPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PHI3_PRETRAINED_MODEL_ARCHIVE_LIST = None class Phi3ForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Phi3ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Phi3ForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Phi3Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Phi3PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST = None class Pix2StructForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Pix2StructPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Pix2StructTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Pix2StructVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PLBART_PRETRAINED_MODEL_ARCHIVE_LIST = None class PLBartForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PLBartForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PLBartForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PLBartModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PLBartPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class PoolFormerForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PoolFormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PoolFormerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) POP2PIANO_PRETRAINED_MODEL_ARCHIVE_LIST = None class Pop2PianoForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Pop2PianoPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class ProphetNetDecoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ProphetNetEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ProphetNetForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ProphetNetForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ProphetNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ProphetNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PVT_PRETRAINED_MODEL_ARCHIVE_LIST = None class PvtForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PvtModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PvtPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PvtV2Backbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PvtV2ForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PvtV2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PvtV2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class QDQBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertLMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_qdqbert(*args, **kwargs): requires_backends(load_tf_weights_in_qdqbert, ["torch"]) class Qwen2ForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Qwen2ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Qwen2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Qwen2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Qwen2MoeForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Qwen2MoeForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Qwen2MoeModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Qwen2MoePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RagModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RagPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RagSequenceForGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RagTokenForGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) REALM_PRETRAINED_MODEL_ARCHIVE_LIST = None class RealmEmbedder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RealmForOpenQA(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RealmKnowledgeAugEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RealmPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RealmReader(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RealmRetriever(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RealmScorer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_realm(*args, **kwargs): requires_backends(load_tf_weights_in_realm, ["torch"]) class RecurrentGemmaForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RecurrentGemmaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RecurrentGemmaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class ReformerAttention(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ReformerForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ReformerForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ReformerForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ReformerLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ReformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ReformerModelWithLMHead(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ReformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) REGNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class RegNetForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RegNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RegNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class RemBertForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RemBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RemBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RemBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RemBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RemBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RemBertLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RemBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RemBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_rembert(*args, **kwargs): requires_backends(load_tf_weights_in_rembert, ["torch"]) RESNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class ResNetBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ResNetForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ResNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ResNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None class RobertaForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST = None class RobertaPreLayerNormForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaPreLayerNormForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaPreLayerNormForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaPreLayerNormForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaPreLayerNormForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaPreLayerNormForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaPreLayerNormModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaPreLayerNormPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class RoCBertForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_roc_bert(*args, **kwargs): requires_backends(load_tf_weights_in_roc_bert, ["torch"]) ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class RoFormerForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoFormerForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoFormerForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoFormerForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoFormerForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoFormerForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoFormerLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoFormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoFormerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_roformer(*args, **kwargs): requires_backends(load_tf_weights_in_roformer, ["torch"]) RWKV_PRETRAINED_MODEL_ARCHIVE_LIST = None class RwkvForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RwkvModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RwkvPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SAM_PRETRAINED_MODEL_ARCHIVE_LIST = None class SamModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SamPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SEAMLESS_M4T_PRETRAINED_MODEL_ARCHIVE_LIST = None class SeamlessM4TCodeHifiGan(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4TForSpeechToSpeech(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4TForSpeechToText(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4TForTextToSpeech(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4TForTextToText(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4THifiGan(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4TModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4TPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4TTextToUnitForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4TTextToUnitModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SEAMLESS_M4T_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None class SeamlessM4Tv2ForSpeechToSpeech(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4Tv2ForSpeechToText(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4Tv2ForTextToSpeech(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4Tv2ForTextToText(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4Tv2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4Tv2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class SegformerDecodeHead(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SegformerForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SegformerForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SegformerLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SegformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SegformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SEGGPT_PRETRAINED_MODEL_ARCHIVE_LIST = None class SegGptForImageSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SegGptModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SegGptPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SEW_PRETRAINED_MODEL_ARCHIVE_LIST = None class SEWForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SEWForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SEWModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SEWPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST = None class SEWDForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SEWDForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SEWDModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SEWDPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SIGLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None class SiglipForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SiglipModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SiglipPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SiglipTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SiglipVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SpeechEncoderDecoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None class Speech2TextForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Speech2TextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Speech2TextPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Speech2Text2ForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Speech2Text2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST = None class SpeechT5ForSpeechToSpeech(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SpeechT5ForSpeechToText(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SpeechT5ForTextToSpeech(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SpeechT5HifiGan(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SpeechT5Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SpeechT5PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST = None class SplinterForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SplinterForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SplinterLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SplinterModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SplinterPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class SqueezeBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SqueezeBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SqueezeBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SqueezeBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SqueezeBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SqueezeBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SqueezeBertModule(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SqueezeBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class StableLmForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class StableLmForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class StableLmModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class StableLmPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Starcoder2ForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Starcoder2ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Starcoder2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Starcoder2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SUPERPOINT_PRETRAINED_MODEL_ARCHIVE_LIST = None class SuperPointForKeypointDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SuperPointPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class SwiftFormerForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwiftFormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwiftFormerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = None class SwinBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwinForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwinForMaskedImageModeling(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwinModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwinPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SWIN2SR_PRETRAINED_MODEL_ARCHIVE_LIST = None class Swin2SRForImageSuperResolution(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Swin2SRModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Swin2SRPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST = None class Swinv2Backbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Swinv2ForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Swinv2ForMaskedImageModeling(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Swinv2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Swinv2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SWITCH_TRANSFORMERS_PRETRAINED_MODEL_ARCHIVE_LIST = None class SwitchTransformersEncoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwitchTransformersForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwitchTransformersModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwitchTransformersPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwitchTransformersSparseMLP(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwitchTransformersTop1Router(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) T5_PRETRAINED_MODEL_ARCHIVE_LIST = None class T5EncoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class T5ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class T5ForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class T5ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class T5ForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class T5Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class T5PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_t5(*args, **kwargs): requires_backends(load_tf_weights_in_t5, ["torch"]) TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TableTransformerForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TableTransformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TableTransformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST = None class TapasForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TapasForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TapasForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TapasModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TapasPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_tapas(*args, **kwargs): requires_backends(load_tf_weights_in_tapas, ["torch"]) TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TimeSeriesTransformerForPrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TimeSeriesTransformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TimeSeriesTransformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TimesformerForVideoClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TimesformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TimesformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TimmBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) TROCR_PRETRAINED_MODEL_ARCHIVE_LIST = None class TrOCRForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TrOCRPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) TVLT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TvltForAudioVisualClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TvltForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TvltModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TvltPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) TVP_PRETRAINED_MODEL_ARCHIVE_LIST = None class TvpForVideoGrounding(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TvpModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TvpPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) UDOP_PRETRAINED_MODEL_ARCHIVE_LIST = None class UdopEncoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UdopForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UdopModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UdopPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UMT5EncoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UMT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UMT5ForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UMT5ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UMT5ForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UMT5Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UMT5PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST = None class UniSpeechForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) UNISPEECH_SAT_PRETRAINED_MODEL_ARCHIVE_LIST = None class UniSpeechSatForAudioFrameClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechSatForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechSatForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechSatForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechSatForXVector(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechSatModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechSatPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class UnivNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UperNetForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UperNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST = None class VideoMAEForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VideoMAEForVideoClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VideoMAEModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VideoMAEPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VILT_PRETRAINED_MODEL_ARCHIVE_LIST = None class ViltForImageAndTextRetrieval(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViltForImagesAndTextClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViltForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViltForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViltForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViltLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViltModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViltPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VIPLLAVA_PRETRAINED_MODEL_ARCHIVE_LIST = None class VipLlavaForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VipLlavaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisionEncoderDecoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisionTextDualEncoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class VisualBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisualBertForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisualBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisualBertForRegionToPhraseAlignment(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisualBertForVisualReasoning(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisualBertLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisualBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisualBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class ViTForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTForMaskedImageModeling(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST = None class ViTHybridForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTHybridModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTHybridPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST = None class ViTMAEForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTMAELayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTMAEModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTMAEPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST = None class ViTMSNForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTMSNModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTMSNPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VITDET_PRETRAINED_MODEL_ARCHIVE_LIST = None class VitDetBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VitDetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VitDetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VITMATTE_PRETRAINED_MODEL_ARCHIVE_LIST = None class VitMatteForImageMatting(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VitMattePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VITS_PRETRAINED_MODEL_ARCHIVE_LIST = None class VitsModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VitsPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class VivitForVideoClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VivitModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VivitPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST = None class Wav2Vec2ForAudioFrameClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ForXVector(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) WAV2VEC2_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class Wav2Vec2BertForAudioFrameClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2BertForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2BertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2BertForXVector(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2BertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2BertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) WAV2VEC2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class Wav2Vec2ConformerForAudioFrameClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ConformerForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ConformerForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ConformerForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ConformerForXVector(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ConformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ConformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST = None class WavLMForAudioFrameClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WavLMForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WavLMForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WavLMForXVector(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WavLMModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WavLMPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST = None class WhisperForAudioClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WhisperForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WhisperForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WhisperModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WhisperPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None class XCLIPModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XCLIPPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XCLIPTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XCLIPVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) XGLM_PRETRAINED_MODEL_ARCHIVE_LIST = None class XGLMForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XGLMModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XGLMPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) XLM_PRETRAINED_MODEL_ARCHIVE_LIST = None class XLMForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMForQuestionAnsweringSimple(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMWithLMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class XLMProphetNetDecoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMProphetNetEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMProphetNetForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMProphetNetForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMProphetNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMProphetNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None class XLMRobertaForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None class XLMRobertaXLForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaXLForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaXLForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaXLForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaXLForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaXLForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaXLModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaXLPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class XLNetForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLNetForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLNetForQuestionAnsweringSimple(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLNetForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLNetForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLNetLMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_xlnet(*args, **kwargs): requires_backends(load_tf_weights_in_xlnet, ["torch"]) XMOD_PRETRAINED_MODEL_ARCHIVE_LIST = None class XmodForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XmodForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XmodForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XmodForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XmodForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XmodForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XmodModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XmodPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST = None class YolosForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YolosModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YolosPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) YOSO_PRETRAINED_MODEL_ARCHIVE_LIST = None class YosoForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YosoForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YosoForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YosoForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YosoForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YosoLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YosoModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YosoPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Adafactor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AdamW(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def get_constant_schedule(*args, **kwargs): requires_backends(get_constant_schedule, ["torch"]) def get_constant_schedule_with_warmup(*args, **kwargs): requires_backends(get_constant_schedule_with_warmup, ["torch"]) def get_cosine_schedule_with_warmup(*args, **kwargs): requires_backends(get_cosine_schedule_with_warmup, ["torch"]) def get_cosine_with_hard_restarts_schedule_with_warmup(*args, **kwargs): requires_backends(get_cosine_with_hard_restarts_schedule_with_warmup, ["torch"]) def get_inverse_sqrt_schedule(*args, **kwargs): requires_backends(get_inverse_sqrt_schedule, ["torch"]) def get_linear_schedule_with_warmup(*args, **kwargs): requires_backends(get_linear_schedule_with_warmup, ["torch"]) def get_polynomial_decay_schedule_with_warmup(*args, **kwargs): requires_backends(get_polynomial_decay_schedule_with_warmup, ["torch"]) def get_scheduler(*args, **kwargs): requires_backends(get_scheduler, ["torch"]) def get_wsd_schedule(*args, **kwargs): requires_backends(get_wsd_schedule, ["torch"]) class Conv1D(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def apply_chunking_to_forward(*args, **kwargs): requires_backends(apply_chunking_to_forward, ["torch"]) def prune_layer(*args, **kwargs): requires_backends(prune_layer, ["torch"]) class Trainer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def torch_distributed_zero_first(*args, **kwargs): requires_backends(torch_distributed_zero_first, ["torch"]) class Seq2SeqTrainer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"])
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/dummy_sentencepiece_and_tokenizers_objects.py
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends SLOW_TO_FAST_CONVERTERS = None def convert_slow_tokenizer(*args, **kwargs): requires_backends(convert_slow_tokenizer, ["sentencepiece", "tokenizers"])
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/versions.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utilities for working with package versions """ import importlib.metadata import operator import re import sys from typing import Optional from packaging import version ops = { "<": operator.lt, "<=": operator.le, "==": operator.eq, "!=": operator.ne, ">=": operator.ge, ">": operator.gt, } def _compare_versions(op, got_ver, want_ver, requirement, pkg, hint): if got_ver is None or want_ver is None: raise ValueError( f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider" f" reinstalling {pkg}." ) if not ops[op](version.parse(got_ver), version.parse(want_ver)): raise ImportError( f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" ) def require_version(requirement: str, hint: Optional[str] = None) -> None: """ Perform a runtime check of the dependency versions, using the exact same syntax used by pip. The installed module version comes from the *site-packages* dir via *importlib.metadata*. Args: requirement (`str`): pip style definition, e.g., "tokenizers==0.9.4", "tqdm>=4.27", "numpy" hint (`str`, *optional*): what suggestion to print in case of requirements not being met Example: ```python require_version("pandas>1.1.2") require_version("numpy>1.18.5", "this is important to have for whatever reason") ```""" hint = f"\n{hint}" if hint is not None else "" # non-versioned check if re.match(r"^[\w_\-\d]+$", requirement): pkg, op, want_ver = requirement, None, None else: match = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)", requirement) if not match: raise ValueError( "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but" f" got {requirement}" ) pkg, want_full = match[0] want_range = want_full.split(",") # there could be multiple requirements wanted = {} for w in want_range: match = re.findall(r"^([\s!=<>]{1,2})(.+)", w) if not match: raise ValueError( "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23," f" but got {requirement}" ) op, want_ver = match[0] wanted[op] = want_ver if op not in ops: raise ValueError(f"{requirement}: need one of {list(ops.keys())}, but got {op}") # special case if pkg == "python": got_ver = ".".join([str(x) for x in sys.version_info[:3]]) for op, want_ver in wanted.items(): _compare_versions(op, got_ver, want_ver, requirement, pkg, hint) return # check if any version is installed try: got_ver = importlib.metadata.version(pkg) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( f"The '{requirement}' distribution was not found and is required by this application. {hint}" ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(op, got_ver, want_ver, requirement, pkg, hint) def require_version_core(requirement): """require_version wrapper which emits a core-specific hint on failure""" hint = "Try: `pip install transformers -U` or `pip install -e '.[dev]'` if you're working with git main" return require_version(requirement, hint)
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/dummy_torchaudio_objects.py
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class MusicgenMelodyFeatureExtractor(metaclass=DummyObject): _backends = ["torchaudio"] def __init__(self, *args, **kwargs): requires_backends(self, ["torchaudio"]) class MusicgenMelodyProcessor(metaclass=DummyObject): _backends = ["torchaudio"] def __init__(self, *args, **kwargs): requires_backends(self, ["torchaudio"])
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/peft_utils.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import os from typing import Dict, Optional, Union from packaging import version from .hub import cached_file from .import_utils import is_peft_available ADAPTER_CONFIG_NAME = "adapter_config.json" ADAPTER_WEIGHTS_NAME = "adapter_model.bin" ADAPTER_SAFE_WEIGHTS_NAME = "adapter_model.safetensors" def find_adapter_config_file( model_id: str, cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, resume_download: bool = False, proxies: Optional[Dict[str, str]] = None, token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, local_files_only: bool = False, subfolder: str = "", _commit_hash: Optional[str] = None, ) -> Optional[str]: r""" Simply checks if the model stored on the Hub or locally is an adapter model or not, return the path of the adapter config file if it is, None otherwise. Args: model_id (`str`): The identifier of the model to look for, can be either a local path or an id to the repository on the Hub. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. <Tip> To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>". </Tip> local_files_only (`bool`, *optional*, defaults to `False`): If `True`, will only try to load the tokenizer configuration from local files. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. """ adapter_cached_filename = None if model_id is None: return None elif os.path.isdir(model_id): list_remote_files = os.listdir(model_id) if ADAPTER_CONFIG_NAME in list_remote_files: adapter_cached_filename = os.path.join(model_id, ADAPTER_CONFIG_NAME) else: adapter_cached_filename = cached_file( model_id, ADAPTER_CONFIG_NAME, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, token=token, revision=revision, local_files_only=local_files_only, subfolder=subfolder, _commit_hash=_commit_hash, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, ) return adapter_cached_filename def check_peft_version(min_version: str) -> None: r""" Checks if the version of PEFT is compatible. Args: version (`str`): The version of PEFT to check against. """ if not is_peft_available(): raise ValueError("PEFT is not installed. Please install it with `pip install peft`") is_peft_version_compatible = version.parse(importlib.metadata.version("peft")) >= version.parse(min_version) if not is_peft_version_compatible: raise ValueError( f"The version of PEFT you are using is not compatible, please use a version that is greater" f" than {min_version}" )
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/dummy_flax_objects.py
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class FlaxForcedBOSTokenLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxForcedEOSTokenLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxForceTokensLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGenerationMixin(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLogitsProcessorList(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLogitsWarper(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMinLengthLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxSuppressTokensAtBeginLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxSuppressTokensLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxTemperatureLogitsWarper(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxTopKLogitsWarper(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxTopPLogitsWarper(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWhisperTimeStampLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = None FLAX_MODEL_FOR_CAUSAL_LM_MAPPING = None FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = None FLAX_MODEL_FOR_MASKED_LM_MAPPING = None FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = None FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = None FLAX_MODEL_FOR_PRETRAINING_MAPPING = None FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING = None FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = None FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = None FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = None FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING = None FLAX_MODEL_MAPPING = None class FlaxAutoModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForImageClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForNextSentencePrediction(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForSeq2SeqLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForSpeechSeq2Seq(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForVision2Seq(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartDecoderPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBeitForImageClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBeitForMaskedImageModeling(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBeitModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBeitPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForNextSentencePrediction(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotSmallForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotSmallModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotSmallPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBloomForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBloomModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBloomPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPTextModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPTextModelWithProjection(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPTextPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPVisionModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPVisionPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxEncoderDecoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGemmaForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGemmaModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGemmaPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPT2LMHeadModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPT2Model(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPT2PreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTNeoForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTNeoModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTNeoPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTJForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTJModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTJPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLlamaForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLlamaModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLlamaPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLongT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLongT5Model(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLongT5PreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMarianModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMarianMTModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMarianPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMBartForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMBartForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMBartForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMBartModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMBartPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMistralForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMistralModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMistralPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMT5EncoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMT5Model(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxOPTForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxOPTModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxOPTPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxPegasusForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxPegasusModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxPegasusPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRegNetForImageClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRegNetModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRegNetPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxResNetForImageClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxResNetModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxResNetPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxSpeechEncoderDecoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxT5EncoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxT5Model(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxT5PreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxVisionEncoderDecoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxVisionTextDualEncoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxViTForImageClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxViTModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxViTPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWav2Vec2ForCTC(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWav2Vec2ForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWav2Vec2Model(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWav2Vec2PreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWhisperForAudioClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWhisperForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWhisperModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWhisperPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXGLMForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXGLMModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXGLMPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None class FlaxXLMRobertaForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"])
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/backbone_utils.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Collection of utils to be used by backbones and their components.""" import enum import inspect from typing import Iterable, List, Optional, Tuple, Union class BackboneType(enum.Enum): TIMM = "timm" TRANSFORMERS = "transformers" def verify_out_features_out_indices( out_features: Optional[Iterable[str]], out_indices: Optional[Iterable[int]], stage_names: Optional[Iterable[str]] ): """ Verify that out_indices and out_features are valid for the given stage_names. """ if stage_names is None: raise ValueError("Stage_names must be set for transformers backbones") if out_features is not None: if not isinstance(out_features, (list,)): raise ValueError(f"out_features must be a list got {type(out_features)}") if any(feat not in stage_names for feat in out_features): raise ValueError(f"out_features must be a subset of stage_names: {stage_names} got {out_features}") if len(out_features) != len(set(out_features)): raise ValueError(f"out_features must not contain any duplicates, got {out_features}") if out_features != (sorted_feats := [feat for feat in stage_names if feat in out_features]): raise ValueError( f"out_features must be in the same order as stage_names, expected {sorted_feats} got {out_features}" ) if out_indices is not None: if not isinstance(out_indices, (list, tuple)): raise ValueError(f"out_indices must be a list or tuple, got {type(out_indices)}") # Convert negative indices to their positive equivalent: [-1,] -> [len(stage_names) - 1,] positive_indices = tuple(idx % len(stage_names) if idx < 0 else idx for idx in out_indices) if any(idx for idx in positive_indices if idx not in range(len(stage_names))): raise ValueError(f"out_indices must be valid indices for stage_names {stage_names}, got {out_indices}") if len(positive_indices) != len(set(positive_indices)): msg = f"out_indices must not contain any duplicates, got {out_indices}" msg += f"(equivalent to {positive_indices}))" if positive_indices != out_indices else "" raise ValueError(msg) if positive_indices != tuple(sorted(positive_indices)): sorted_negative = tuple(idx for _, idx in sorted(zip(positive_indices, out_indices), key=lambda x: x[0])) raise ValueError( f"out_indices must be in the same order as stage_names, expected {sorted_negative} got {out_indices}" ) if out_features is not None and out_indices is not None: if len(out_features) != len(out_indices): raise ValueError("out_features and out_indices should have the same length if both are set") if out_features != [stage_names[idx] for idx in out_indices]: raise ValueError("out_features and out_indices should correspond to the same stages if both are set") def _align_output_features_output_indices( out_features: Optional[List[str]], out_indices: Optional[Union[List[int], Tuple[int]]], stage_names: List[str], ): """ Finds the corresponding `out_features` and `out_indices` for the given `stage_names`. The logic is as follows: - `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the `out_indices`. - `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding to the `out_features`. - `out_indices` and `out_features` not set: `out_indices` and `out_features` are set to the last stage. - `out_indices` and `out_features` set: input `out_indices` and `out_features` are returned. Args: out_features (`List[str]`): The names of the features for the backbone to output. out_indices (`List[int]` or `Tuple[int]`): The indices of the features for the backbone to output. stage_names (`List[str]`): The names of the stages of the backbone. """ if out_indices is None and out_features is None: out_indices = [len(stage_names) - 1] out_features = [stage_names[-1]] elif out_indices is None and out_features is not None: out_indices = [stage_names.index(layer) for layer in out_features] elif out_features is None and out_indices is not None: out_features = [stage_names[idx] for idx in out_indices] return out_features, out_indices def get_aligned_output_features_output_indices( out_features: Optional[List[str]], out_indices: Optional[Union[List[int], Tuple[int]]], stage_names: List[str], ) -> Tuple[List[str], List[int]]: """ Get the `out_features` and `out_indices` so that they are aligned. The logic is as follows: - `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the `out_indices`. - `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding to the `out_features`. - `out_indices` and `out_features` not set: `out_indices` and `out_features` are set to the last stage. - `out_indices` and `out_features` set: they are verified to be aligned. Args: out_features (`List[str]`): The names of the features for the backbone to output. out_indices (`List[int]` or `Tuple[int]`): The indices of the features for the backbone to output. stage_names (`List[str]`): The names of the stages of the backbone. """ # First verify that the out_features and out_indices are valid verify_out_features_out_indices(out_features=out_features, out_indices=out_indices, stage_names=stage_names) output_features, output_indices = _align_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=stage_names ) # Verify that the aligned out_features and out_indices are valid verify_out_features_out_indices(out_features=output_features, out_indices=output_indices, stage_names=stage_names) return output_features, output_indices class BackboneMixin: backbone_type: Optional[BackboneType] = None def _init_timm_backbone(self, config) -> None: """ Initialize the backbone model from timm The backbone must already be loaded to self._backbone """ if getattr(self, "_backbone", None) is None: raise ValueError("self._backbone must be set before calling _init_timm_backbone") # These will diagree with the defaults for the transformers models e.g. for resnet50 # the transformer model has out_features = ['stem', 'stage1', 'stage2', 'stage3', 'stage4'] # the timm model has out_features = ['act', 'layer1', 'layer2', 'layer3', 'layer4'] self.stage_names = [stage["module"] for stage in self._backbone.feature_info.info] self.num_features = [stage["num_chs"] for stage in self._backbone.feature_info.info] out_indices = self._backbone.feature_info.out_indices out_features = self._backbone.feature_info.module_name() # We verify the out indices and out features are valid verify_out_features_out_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) self._out_features, self._out_indices = out_features, out_indices def _init_transformers_backbone(self, config) -> None: stage_names = getattr(config, "stage_names") out_features = getattr(config, "out_features", None) out_indices = getattr(config, "out_indices", None) self.stage_names = stage_names self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=stage_names ) # Number of channels for each stage. This is set in the transformer backbone model init self.num_features = None def _init_backbone(self, config) -> None: """ Method to initialize the backbone. This method is called by the constructor of the base class after the pretrained model weights have been loaded. """ self.config = config self.use_timm_backbone = getattr(config, "use_timm_backbone", False) self.backbone_type = BackboneType.TIMM if self.use_timm_backbone else BackboneType.TRANSFORMERS if self.backbone_type == BackboneType.TIMM: self._init_timm_backbone(config) elif self.backbone_type == BackboneType.TRANSFORMERS: self._init_transformers_backbone(config) else: raise ValueError(f"backbone_type {self.backbone_type} not supported.") @property def out_features(self): return self._out_features @out_features.setter def out_features(self, out_features: List[str]): """ Set the out_features attribute. This will also update the out_indices attribute to match the new out_features. """ self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=None, stage_names=self.stage_names ) @property def out_indices(self): return self._out_indices @out_indices.setter def out_indices(self, out_indices: Union[Tuple[int], List[int]]): """ Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices. """ self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=None, out_indices=out_indices, stage_names=self.stage_names ) @property def out_feature_channels(self): # the current backbones will output the number of channels for each stage # even if that stage is not in the out_features list. return {stage: self.num_features[i] for i, stage in enumerate(self.stage_names)} @property def channels(self): return [self.out_feature_channels[name] for name in self.out_features] def forward_with_filtered_kwargs(self, *args, **kwargs): signature = dict(inspect.signature(self.forward).parameters) filtered_kwargs = {k: v for k, v in kwargs.items() if k in signature} return self(*args, **filtered_kwargs) def forward( self, pixel_values, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ): raise NotImplementedError("This method should be implemented by the derived class.") def to_dict(self): """ Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PretrainedConfig` to include the `out_features` and `out_indices` attributes. """ output = super().to_dict() output["out_features"] = output.pop("_out_features") output["out_indices"] = output.pop("_out_indices") return output class BackboneConfigMixin: """ A Mixin to support handling the `out_features` and `out_indices` attributes for the backbone configurations. """ @property def out_features(self): return self._out_features @out_features.setter def out_features(self, out_features: List[str]): """ Set the out_features attribute. This will also update the out_indices attribute to match the new out_features. """ self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=None, stage_names=self.stage_names ) @property def out_indices(self): return self._out_indices @out_indices.setter def out_indices(self, out_indices: Union[Tuple[int], List[int]]): """ Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices. """ self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=None, out_indices=out_indices, stage_names=self.stage_names ) def to_dict(self): """ Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PretrainedConfig` to include the `out_features` and `out_indices` attributes. """ output = super().to_dict() output["out_features"] = output.pop("_out_features") output["out_indices"] = output.pop("_out_indices") return output def load_backbone(config): """ Loads the backbone model from a config object. If the config is from the backbone model itself, then we return a backbone model with randomly initialized weights. If the config is from the parent model of the backbone model itself, then we load the pretrained backbone weights if specified. """ from transformers import AutoBackbone, AutoConfig backbone_config = getattr(config, "backbone_config", None) use_timm_backbone = getattr(config, "use_timm_backbone", None) use_pretrained_backbone = getattr(config, "use_pretrained_backbone", None) backbone_checkpoint = getattr(config, "backbone", None) backbone_kwargs = getattr(config, "backbone_kwargs", None) backbone_kwargs = {} if backbone_kwargs is None else backbone_kwargs if backbone_kwargs and backbone_config is not None: raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.") # If there is a backbone_config and a backbone checkpoint, and use_pretrained_backbone=False then the desired # behaviour is ill-defined: do you want to load from the checkpoint's config or the backbone_config? if backbone_config is not None and backbone_checkpoint is not None and use_pretrained_backbone is not None: raise ValueError("Cannot specify both config.backbone_config and config.backbone") # If any of thhe following are set, then the config passed in is from a model which contains a backbone. if ( backbone_config is None and use_timm_backbone is None and backbone_checkpoint is None and backbone_checkpoint is None ): return AutoBackbone.from_config(config=config, **backbone_kwargs) # config from the parent model that has a backbone if use_timm_backbone: if backbone_checkpoint is None: raise ValueError("config.backbone must be set if use_timm_backbone is True") # Because of how timm backbones were originally added to models, we need to pass in use_pretrained_backbone # to determine whether to load the pretrained weights. backbone = AutoBackbone.from_pretrained( backbone_checkpoint, use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, **backbone_kwargs, ) elif use_pretrained_backbone: if backbone_checkpoint is None: raise ValueError("config.backbone must be set if use_pretrained_backbone is True") backbone = AutoBackbone.from_pretrained(backbone_checkpoint, **backbone_kwargs) else: if backbone_config is None and backbone_checkpoint is None: raise ValueError("Either config.backbone_config or config.backbone must be set") if backbone_config is None: backbone_config = AutoConfig.from_pretrained(backbone_checkpoint, **backbone_kwargs) backbone = AutoBackbone.from_config(config=backbone_config) return backbone
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/dummy_vision_objects.py
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class ImageProcessingMixin(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ImageFeatureExtractionMixin(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BeitFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BeitImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BitImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BlipImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BridgeTowerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ChineseCLIPFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ChineseCLIPImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class CLIPFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class CLIPImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ConditionalDetrFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ConditionalDetrImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ConvNextFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ConvNextImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DeformableDetrFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DeformableDetrImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DeiTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DeiTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DetaImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DetrFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DetrImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DonutFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DonutImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DPTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DPTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class EfficientFormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class EfficientNetImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class FlavaFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class FlavaImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class FlavaProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class FuyuImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class FuyuProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class GLPNFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class GLPNImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class GroundingDinoImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class IdeficsImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class Idefics2ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ImageGPTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ImageGPTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LayoutLMv2FeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LayoutLMv2ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LayoutLMv3FeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LayoutLMv3ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LevitFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LevitImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LlavaNextImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class Mask2FormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MaskFormerFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MaskFormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileNetV1FeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileNetV1ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileNetV2FeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileNetV2ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileViTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileViTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class NougatImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class OneFormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class Owlv2ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class OwlViTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class OwlViTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PerceiverFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PerceiverImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class Pix2StructImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PoolFormerFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PoolFormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PvtImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class SamImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class SegformerFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class SegformerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class SegGptImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class SiglipImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class SuperPointImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class Swin2SRImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class TvltImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class TvpImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class VideoMAEFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class VideoMAEImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViltFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViltImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViltProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViTHybridImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class VitMatteImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class VivitImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class YolosFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class YolosImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"])
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/dummy_keras_nlp_objects.py
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class TFGPT2Tokenizer(metaclass=DummyObject): _backends = ["keras_nlp"] def __init__(self, *args, **kwargs): requires_backends(self, ["keras_nlp"])
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/hp_naming.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import re class TrialShortNamer: PREFIX = "hp" DEFAULTS = {} NAMING_INFO = None @classmethod def set_defaults(cls, prefix, defaults): cls.PREFIX = prefix cls.DEFAULTS = defaults cls.build_naming_info() @staticmethod def shortname_for_word(info, word): if len(word) == 0: return "" short_word = None if any(char.isdigit() for char in word): raise Exception(f"Parameters should not contain numbers: '{word}' contains a number") if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1, len(word) + 1): prefix = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: short_word = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(integer): s = "" while integer != 0: s = chr(ord("A") + integer % 10) + s integer //= 10 return s i = 0 while True: sword = word + "#" + int_to_alphabetic(i) if sword in info["reverse_short_word"]: continue else: short_word = sword break info["short_word"][word] = short_word info["reverse_short_word"][short_word] = word return short_word @staticmethod def shortname_for_key(info, param_name): words = param_name.split("_") shortname_parts = [TrialShortNamer.shortname_for_word(info, word) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name separators = ["", "_"] for separator in separators: shortname = separator.join(shortname_parts) if shortname not in info["reverse_short_param"]: info["short_param"][param_name] = shortname info["reverse_short_param"][shortname] = param_name return shortname return param_name @staticmethod def add_new_param_name(info, param_name): short_name = TrialShortNamer.shortname_for_key(info, param_name) info["short_param"][param_name] = short_name info["reverse_short_param"][short_name] = param_name @classmethod def build_naming_info(cls): if cls.NAMING_INFO is not None: return info = { "short_word": {}, "reverse_short_word": {}, "short_param": {}, "reverse_short_param": {}, } field_keys = list(cls.DEFAULTS.keys()) for k in field_keys: cls.add_new_param_name(info, k) cls.NAMING_INFO = info @classmethod def shortname(cls, params): cls.build_naming_info() assert cls.PREFIX is not None name = [copy.copy(cls.PREFIX)] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f"You should provide a default value for the param name {k} with value {v}") if v == cls.DEFAULTS[k]: # The default value is not added to the name continue key = cls.NAMING_INFO["short_param"][k] if isinstance(v, bool): v = 1 if v else 0 sep = "" if isinstance(v, (int, float)) else "-" e = f"{key}{sep}{v}" name.append(e) return "_".join(name) @classmethod def parse_repr(cls, repr): repr = repr[len(cls.PREFIX) + 1 :] if repr == "": values = [] else: values = repr.split("_") parameters = {} for value in values: if "-" in value: p_k, p_v = value.split("-") else: p_k = re.sub("[0-9.]", "", value) p_v = float(re.sub("[^0-9.]", "", value)) key = cls.NAMING_INFO["reverse_short_param"][p_k] parameters[key] = p_v for k in cls.DEFAULTS: if k not in parameters: parameters[k] = cls.DEFAULTS[k] return parameters
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/logging.py
# coding=utf-8 # Copyright 2020 Optuna, Hugging Face # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Logging utilities.""" import functools import logging import os import sys import threading from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from logging import captureWarnings as _captureWarnings from typing import Optional import huggingface_hub.utils as hf_hub_utils from tqdm import auto as tqdm_lib _lock = threading.Lock() _default_handler: Optional[logging.Handler] = None log_levels = { "detail": logging.DEBUG, # will also print filename and line number "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } _default_log_level = logging.WARNING _tqdm_active = not hf_hub_utils.are_progress_bars_disabled() def _get_default_logging_level(): """ If TRANSFORMERS_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is not - fall back to `_default_log_level` """ env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, " f"has to be one of: { ', '.join(log_levels.keys()) }" ) return _default_log_level def _get_library_name() -> str: return __name__.split(".")[0] def _get_library_root_logger() -> logging.Logger: return logging.getLogger(_get_library_name()) def _configure_library_root_logger() -> None: global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return _default_handler = logging.StreamHandler() # Set sys.stderr as stream. # set defaults based on https://github.com/pyinstaller/pyinstaller/issues/7334#issuecomment-1357447176 if sys.stderr is None: sys.stderr = open(os.devnull, "w") _default_handler.flush = sys.stderr.flush # Apply our default configuration to the library root logger. library_root_logger = _get_library_root_logger() library_root_logger.addHandler(_default_handler) library_root_logger.setLevel(_get_default_logging_level()) # if logging level is debug, we add pathname and lineno to formatter for easy debugging if os.getenv("TRANSFORMERS_VERBOSITY", None) == "detail": formatter = logging.Formatter("[%(levelname)s|%(pathname)s:%(lineno)s] %(asctime)s >> %(message)s") _default_handler.setFormatter(formatter) library_root_logger.propagate = False def _reset_library_root_logger() -> None: global _default_handler with _lock: if not _default_handler: return library_root_logger = _get_library_root_logger() library_root_logger.removeHandler(_default_handler) library_root_logger.setLevel(logging.NOTSET) _default_handler = None def get_log_levels_dict(): return log_levels def captureWarnings(capture): """ Calls the `captureWarnings` method from the logging library to enable management of the warnings emitted by the `warnings` library. Read more about this method here: https://docs.python.org/3/library/logging.html#integration-with-the-warnings-module All warnings will be logged through the `py.warnings` logger. Careful: this method also adds a handler to this logger if it does not already have one, and updates the logging level of that logger to the library's root logger. """ logger = get_logger("py.warnings") if not logger.handlers: logger.addHandler(_default_handler) logger.setLevel(_get_library_root_logger().level) _captureWarnings(capture) def get_logger(name: Optional[str] = None) -> logging.Logger: """ Return a logger with the specified name. This function is not supposed to be directly accessed unless you are writing a custom transformers module. """ if name is None: name = _get_library_name() _configure_library_root_logger() return logging.getLogger(name) def get_verbosity() -> int: """ Return the current level for the 🤗 Transformers's root logger as an int. Returns: `int`: The logging level. <Tip> 🤗 Transformers has following logging levels: - 50: `transformers.logging.CRITICAL` or `transformers.logging.FATAL` - 40: `transformers.logging.ERROR` - 30: `transformers.logging.WARNING` or `transformers.logging.WARN` - 20: `transformers.logging.INFO` - 10: `transformers.logging.DEBUG` </Tip>""" _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def set_verbosity(verbosity: int) -> None: """ Set the verbosity level for the 🤗 Transformers's root logger. Args: verbosity (`int`): Logging level, e.g., one of: - `transformers.logging.CRITICAL` or `transformers.logging.FATAL` - `transformers.logging.ERROR` - `transformers.logging.WARNING` or `transformers.logging.WARN` - `transformers.logging.INFO` - `transformers.logging.DEBUG` """ _configure_library_root_logger() _get_library_root_logger().setLevel(verbosity) def set_verbosity_info(): """Set the verbosity to the `INFO` level.""" return set_verbosity(INFO) def set_verbosity_warning(): """Set the verbosity to the `WARNING` level.""" return set_verbosity(WARNING) def set_verbosity_debug(): """Set the verbosity to the `DEBUG` level.""" return set_verbosity(DEBUG) def set_verbosity_error(): """Set the verbosity to the `ERROR` level.""" return set_verbosity(ERROR) def disable_default_handler() -> None: """Disable the default handler of the HuggingFace Transformers's root logger.""" _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler) def enable_default_handler() -> None: """Enable the default handler of the HuggingFace Transformers's root logger.""" _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler) def add_handler(handler: logging.Handler) -> None: """adds a handler to the HuggingFace Transformers's root logger.""" _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(handler) def remove_handler(handler: logging.Handler) -> None: """removes given handler from the HuggingFace Transformers's root logger.""" _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(handler) def disable_propagation() -> None: """ Disable propagation of the library log outputs. Note that log propagation is disabled by default. """ _configure_library_root_logger() _get_library_root_logger().propagate = False def enable_propagation() -> None: """ Enable propagation of the library log outputs. Please disable the HuggingFace Transformers's default handler to prevent double logging if the root logger has been configured. """ _configure_library_root_logger() _get_library_root_logger().propagate = True def enable_explicit_format() -> None: """ Enable explicit formatting for every HuggingFace Transformers's logger. The explicit formatter is as follows: ``` [LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE ``` All handlers currently bound to the root logger are affected by this method. """ handlers = _get_library_root_logger().handlers for handler in handlers: formatter = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s") handler.setFormatter(formatter) def reset_format() -> None: """ Resets the formatting for HuggingFace Transformers's loggers. All handlers currently bound to the root logger are affected by this method. """ handlers = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(None) def warning_advice(self, *args, **kwargs): """ This method is identical to `logger.warning()`, but if env var TRANSFORMERS_NO_ADVISORY_WARNINGS=1 is set, this warning will not be printed """ no_advisory_warnings = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS", False) if no_advisory_warnings: return self.warning(*args, **kwargs) logging.Logger.warning_advice = warning_advice @functools.lru_cache(None) def warning_once(self, *args, **kwargs): """ This method is identical to `logger.warning()`, but will emit the warning with the same message only once Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the cache. The assumption here is that all warning messages are unique across the code. If they aren't then need to switch to another type of cache that includes the caller frame information in the hashing function. """ self.warning(*args, **kwargs) logging.Logger.warning_once = warning_once class EmptyTqdm: """Dummy tqdm which doesn't do anything.""" def __init__(self, *args, **kwargs): # pylint: disable=unused-argument self._iterator = args[0] if args else None def __iter__(self): return iter(self._iterator) def __getattr__(self, _): """Return empty function.""" def empty_fn(*args, **kwargs): # pylint: disable=unused-argument return return empty_fn def __enter__(self): return self def __exit__(self, type_, value, traceback): return class _tqdm_cls: def __call__(self, *args, **kwargs): if _tqdm_active: return tqdm_lib.tqdm(*args, **kwargs) else: return EmptyTqdm(*args, **kwargs) def set_lock(self, *args, **kwargs): self._lock = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*args, **kwargs) def get_lock(self): if _tqdm_active: return tqdm_lib.tqdm.get_lock() tqdm = _tqdm_cls() def is_progress_bar_enabled() -> bool: """Return a boolean indicating whether tqdm progress bars are enabled.""" global _tqdm_active return bool(_tqdm_active) def enable_progress_bar(): """Enable tqdm progress bar.""" global _tqdm_active _tqdm_active = True hf_hub_utils.enable_progress_bars() def disable_progress_bar(): """Disable tqdm progress bar.""" global _tqdm_active _tqdm_active = False hf_hub_utils.disable_progress_bars()
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/doc.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Doc utilities: Utilities related to documentation """ import functools import re import types def add_start_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "") return fn return docstring_decorator def add_start_docstrings_to_model_forward(*docstr): def docstring_decorator(fn): docstring = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "") class_name = f"[`{fn.__qualname__.split('.')[0]}`]" intro = f" The {class_name} forward method, overrides the `__call__` special method." note = r""" <Tip> Although the recipe for forward pass needs to be defined within this function, one should call the [`Module`] instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them. </Tip> """ fn.__doc__ = intro + note + docstring return fn return docstring_decorator def add_end_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "".join(docstr) return fn return docstring_decorator PT_RETURN_INTRODUCTION = r""" Returns: [`{full_output_type}`] or `tuple(torch.FloatTensor)`: A [`{full_output_type}`] or a tuple of `torch.FloatTensor` (if `return_dict=False` is passed or when `config.return_dict=False`) comprising various elements depending on the configuration ([`{config_class}`]) and inputs. """ TF_RETURN_INTRODUCTION = r""" Returns: [`{full_output_type}`] or `tuple(tf.Tensor)`: A [`{full_output_type}`] or a tuple of `tf.Tensor` (if `return_dict=False` is passed or when `config.return_dict=False`) comprising various elements depending on the configuration ([`{config_class}`]) and inputs. """ def _get_indent(t): """Returns the indentation in the first line of t""" search = re.search(r"^(\s*)\S", t) return "" if search is None else search.groups()[0] def _convert_output_args_doc(output_args_doc): """Convert output_args_doc to display properly.""" # Split output_arg_doc in blocks argument/description indent = _get_indent(output_args_doc) blocks = [] current_block = "" for line in output_args_doc.split("\n"): # If the indent is the same as the beginning, the line is the name of new arg. if _get_indent(line) == indent: if len(current_block) > 0: blocks.append(current_block[:-1]) current_block = f"{line}\n" else: # Otherwise it's part of the description of the current arg. # We need to remove 2 spaces to the indentation. current_block += f"{line[2:]}\n" blocks.append(current_block[:-1]) # Format each block for proper rendering for i in range(len(blocks)): blocks[i] = re.sub(r"^(\s+)(\S+)(\s+)", r"\1- **\2**\3", blocks[i]) blocks[i] = re.sub(r":\s*\n\s*(\S)", r" -- \1", blocks[i]) return "\n".join(blocks) def _prepare_output_docstrings(output_type, config_class, min_indent=None): """ Prepares the return part of the docstring using `output_type`. """ output_docstring = output_type.__doc__ # Remove the head of the docstring to keep the list of args only lines = output_docstring.split("\n") i = 0 while i < len(lines) and re.search(r"^\s*(Args|Parameters):\s*$", lines[i]) is None: i += 1 if i < len(lines): params_docstring = "\n".join(lines[(i + 1) :]) params_docstring = _convert_output_args_doc(params_docstring) else: raise ValueError( f"No `Args` or `Parameters` section is found in the docstring of `{output_type.__name__}`. Make sure it has " "docstring and contain either `Args` or `Parameters`." ) # Add the return introduction full_output_type = f"{output_type.__module__}.{output_type.__name__}" intro = TF_RETURN_INTRODUCTION if output_type.__name__.startswith("TF") else PT_RETURN_INTRODUCTION intro = intro.format(full_output_type=full_output_type, config_class=config_class) result = intro + params_docstring # Apply minimum indent if necessary if min_indent is not None: lines = result.split("\n") # Find the indent of the first nonempty line i = 0 while len(lines[i]) == 0: i += 1 indent = len(_get_indent(lines[i])) # If too small, add indentation to all nonempty lines if indent < min_indent: to_add = " " * (min_indent - indent) lines = [(f"{to_add}{line}" if len(line) > 0 else line) for line in lines] result = "\n".join(lines) return result FAKE_MODEL_DISCLAIMER = """ <Tip warning={true}> This example uses a random model as the real ones are all very big. To get proper results, you should use {real_checkpoint} instead of {fake_checkpoint}. If you get out-of-memory when loading that checkpoint, you can try adding `device_map="auto"` in the `from_pretrained` call. </Tip> """ PT_TOKEN_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer( ... "HuggingFace is a company based in Paris and New York", add_special_tokens=False, return_tensors="pt" ... ) >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_token_class_ids = logits.argmax(-1) >>> # Note that tokens are classified rather then input words which means that >>> # there might be more predicted token classes than words. >>> # Multiple token classes might account for the same word >>> predicted_tokens_classes = [model.config.id2label[t.item()] for t in predicted_token_class_ids[0]] >>> predicted_tokens_classes {expected_output} >>> labels = predicted_token_class_ids >>> loss = model(**inputs, labels=labels).loss >>> round(loss.item(), 2) {expected_loss} ``` """ PT_QUESTION_ANSWERING_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> inputs = tokenizer(question, text, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> answer_start_index = outputs.start_logits.argmax() >>> answer_end_index = outputs.end_logits.argmax() >>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1] >>> tokenizer.decode(predict_answer_tokens, skip_special_tokens=True) {expected_output} >>> # target is "nice puppet" >>> target_start_index = torch.tensor([{qa_target_start_index}]) >>> target_end_index = torch.tensor([{qa_target_end_index}]) >>> outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index) >>> loss = outputs.loss >>> round(loss.item(), 2) {expected_loss} ``` """ PT_SEQUENCE_CLASSIFICATION_SAMPLE = r""" Example of single-label classification: ```python >>> import torch >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_id = logits.argmax().item() >>> model.config.id2label[predicted_class_id] {expected_output} >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` >>> num_labels = len(model.config.id2label) >>> model = {model_class}.from_pretrained("{checkpoint}", num_labels=num_labels) >>> labels = torch.tensor([1]) >>> loss = model(**inputs, labels=labels).loss >>> round(loss.item(), 2) {expected_loss} ``` Example of multi-label classification: ```python >>> import torch >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}", problem_type="multi_label_classification") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_ids = torch.arange(0, logits.shape[-1])[torch.sigmoid(logits).squeeze(dim=0) > 0.5] >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` >>> num_labels = len(model.config.id2label) >>> model = {model_class}.from_pretrained( ... "{checkpoint}", num_labels=num_labels, problem_type="multi_label_classification" ... ) >>> labels = torch.sum( ... torch.nn.functional.one_hot(predicted_class_ids[None, :].clone(), num_classes=num_labels), dim=1 ... ).to(torch.float) >>> loss = model(**inputs, labels=labels).loss ``` """ PT_MASKED_LM_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> # retrieve index of {mask} >>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0] >>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1) >>> tokenizer.decode(predicted_token_id) {expected_output} >>> labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] >>> # mask labels of non-{mask} tokens >>> labels = torch.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100) >>> outputs = model(**inputs, labels=labels) >>> round(outputs.loss.item(), 2) {expected_loss} ``` """ PT_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ``` """ PT_MULTIPLE_CHOICE_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> choice0 = "It is eaten with a fork and a knife." >>> choice1 = "It is eaten while held in the hand." >>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="pt", padding=True) >>> outputs = model(**{{k: v.unsqueeze(0) for k, v in encoding.items()}}, labels=labels) # batch size is 1 >>> # the linear classifier still needs to be trained >>> loss = outputs.loss >>> logits = outputs.logits ``` """ PT_CAUSAL_LM_SAMPLE = r""" Example: ```python >>> import torch >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs, labels=inputs["input_ids"]) >>> loss = outputs.loss >>> logits = outputs.logits ``` """ PT_SPEECH_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoProcessor, {model_class} >>> import torch >>> from datasets import load_dataset >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = AutoProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) {expected_output} ``` """ PT_SPEECH_CTC_SAMPLE = r""" Example: ```python >>> from transformers import AutoProcessor, {model_class} >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = AutoProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_ids = torch.argmax(logits, dim=-1) >>> # transcribe speech >>> transcription = processor.batch_decode(predicted_ids) >>> transcription[0] {expected_output} >>> inputs["labels"] = processor(text=dataset[0]["text"], return_tensors="pt").input_ids >>> # compute loss >>> loss = model(**inputs).loss >>> round(loss.item(), 2) {expected_loss} ``` """ PT_SPEECH_SEQ_CLASS_SAMPLE = r""" Example: ```python >>> from transformers import AutoFeatureExtractor, {model_class} >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = feature_extractor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_ids = torch.argmax(logits, dim=-1).item() >>> predicted_label = model.config.id2label[predicted_class_ids] >>> predicted_label {expected_output} >>> # compute loss - target_label is e.g. "down" >>> target_label = model.config.id2label[0] >>> inputs["labels"] = torch.tensor([model.config.label2id[target_label]]) >>> loss = model(**inputs).loss >>> round(loss.item(), 2) {expected_loss} ``` """ PT_SPEECH_FRAME_CLASS_SAMPLE = r""" Example: ```python >>> from transformers import AutoFeatureExtractor, {model_class} >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt", sampling_rate=sampling_rate) >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> probabilities = torch.sigmoid(logits[0]) >>> # labels is a one-hot array of shape (num_frames, num_speakers) >>> labels = (probabilities > 0.5).long() >>> labels[0].tolist() {expected_output} ``` """ PT_SPEECH_XVECTOR_SAMPLE = r""" Example: ```python >>> from transformers import AutoFeatureExtractor, {model_class} >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = feature_extractor( ... [d["array"] for d in dataset[:2]["audio"]], sampling_rate=sampling_rate, return_tensors="pt", padding=True ... ) >>> with torch.no_grad(): ... embeddings = model(**inputs).embeddings >>> embeddings = torch.nn.functional.normalize(embeddings, dim=-1).cpu() >>> # the resulting embeddings can be used for cosine similarity-based retrieval >>> cosine_sim = torch.nn.CosineSimilarity(dim=-1) >>> similarity = cosine_sim(embeddings[0], embeddings[1]) >>> threshold = 0.7 # the optimal threshold is dataset-dependent >>> if similarity < threshold: ... print("Speakers are not the same!") >>> round(similarity.item(), 2) {expected_output} ``` """ PT_VISION_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoImageProcessor, {model_class} >>> import torch >>> from datasets import load_dataset >>> dataset = load_dataset("huggingface/cats-image") >>> image = dataset["test"]["image"][0] >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = image_processor(image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) {expected_output} ``` """ PT_VISION_SEQ_CLASS_SAMPLE = r""" Example: ```python >>> from transformers import AutoImageProcessor, {model_class} >>> import torch >>> from datasets import load_dataset >>> dataset = load_dataset("huggingface/cats-image") >>> image = dataset["test"]["image"][0] >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = image_processor(image, return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> # model predicts one of the 1000 ImageNet classes >>> predicted_label = logits.argmax(-1).item() >>> print(model.config.id2label[predicted_label]) {expected_output} ``` """ PT_SAMPLE_DOCSTRINGS = { "SequenceClassification": PT_SEQUENCE_CLASSIFICATION_SAMPLE, "QuestionAnswering": PT_QUESTION_ANSWERING_SAMPLE, "TokenClassification": PT_TOKEN_CLASSIFICATION_SAMPLE, "MultipleChoice": PT_MULTIPLE_CHOICE_SAMPLE, "MaskedLM": PT_MASKED_LM_SAMPLE, "LMHead": PT_CAUSAL_LM_SAMPLE, "BaseModel": PT_BASE_MODEL_SAMPLE, "SpeechBaseModel": PT_SPEECH_BASE_MODEL_SAMPLE, "CTC": PT_SPEECH_CTC_SAMPLE, "AudioClassification": PT_SPEECH_SEQ_CLASS_SAMPLE, "AudioFrameClassification": PT_SPEECH_FRAME_CLASS_SAMPLE, "AudioXVector": PT_SPEECH_XVECTOR_SAMPLE, "VisionBaseModel": PT_VISION_BASE_MODEL_SAMPLE, "ImageClassification": PT_VISION_SEQ_CLASS_SAMPLE, } TF_TOKEN_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer( ... "HuggingFace is a company based in Paris and New York", add_special_tokens=False, return_tensors="tf" ... ) >>> logits = model(**inputs).logits >>> predicted_token_class_ids = tf.math.argmax(logits, axis=-1) >>> # Note that tokens are classified rather then input words which means that >>> # there might be more predicted token classes than words. >>> # Multiple token classes might account for the same word >>> predicted_tokens_classes = [model.config.id2label[t] for t in predicted_token_class_ids[0].numpy().tolist()] >>> predicted_tokens_classes {expected_output} ``` ```python >>> labels = predicted_token_class_ids >>> loss = tf.math.reduce_mean(model(**inputs, labels=labels).loss) >>> round(float(loss), 2) {expected_loss} ``` """ TF_QUESTION_ANSWERING_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> inputs = tokenizer(question, text, return_tensors="tf") >>> outputs = model(**inputs) >>> answer_start_index = int(tf.math.argmax(outputs.start_logits, axis=-1)[0]) >>> answer_end_index = int(tf.math.argmax(outputs.end_logits, axis=-1)[0]) >>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1] >>> tokenizer.decode(predict_answer_tokens) {expected_output} ``` ```python >>> # target is "nice puppet" >>> target_start_index = tf.constant([{qa_target_start_index}]) >>> target_end_index = tf.constant([{qa_target_end_index}]) >>> outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index) >>> loss = tf.math.reduce_mean(outputs.loss) >>> round(float(loss), 2) {expected_loss} ``` """ TF_SEQUENCE_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> logits = model(**inputs).logits >>> predicted_class_id = int(tf.math.argmax(logits, axis=-1)[0]) >>> model.config.id2label[predicted_class_id] {expected_output} ``` ```python >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` >>> num_labels = len(model.config.id2label) >>> model = {model_class}.from_pretrained("{checkpoint}", num_labels=num_labels) >>> labels = tf.constant(1) >>> loss = model(**inputs, labels=labels).loss >>> round(float(loss), 2) {expected_loss} ``` """ TF_MASKED_LM_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="tf") >>> logits = model(**inputs).logits >>> # retrieve index of {mask} >>> mask_token_index = tf.where((inputs.input_ids == tokenizer.mask_token_id)[0]) >>> selected_logits = tf.gather_nd(logits[0], indices=mask_token_index) >>> predicted_token_id = tf.math.argmax(selected_logits, axis=-1) >>> tokenizer.decode(predicted_token_id) {expected_output} ``` ```python >>> labels = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"] >>> # mask labels of non-{mask} tokens >>> labels = tf.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100) >>> outputs = model(**inputs, labels=labels) >>> round(float(outputs.loss), 2) {expected_loss} ``` """ TF_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> outputs = model(inputs) >>> last_hidden_states = outputs.last_hidden_state ``` """ TF_MULTIPLE_CHOICE_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> choice0 = "It is eaten with a fork and a knife." >>> choice1 = "It is eaten while held in the hand." >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="tf", padding=True) >>> inputs = {{k: tf.expand_dims(v, 0) for k, v in encoding.items()}} >>> outputs = model(inputs) # batch size is 1 >>> # the linear classifier still needs to be trained >>> logits = outputs.logits ``` """ TF_CAUSAL_LM_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> outputs = model(inputs) >>> logits = outputs.logits ``` """ TF_SPEECH_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoProcessor, {model_class} >>> from datasets import load_dataset >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = AutoProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="tf") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) {expected_output} ``` """ TF_SPEECH_CTC_SAMPLE = r""" Example: ```python >>> from transformers import AutoProcessor, {model_class} >>> from datasets import load_dataset >>> import tensorflow as tf >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = AutoProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="tf") >>> logits = model(**inputs).logits >>> predicted_ids = tf.math.argmax(logits, axis=-1) >>> # transcribe speech >>> transcription = processor.batch_decode(predicted_ids) >>> transcription[0] {expected_output} ``` ```python >>> inputs["labels"] = processor(text=dataset[0]["text"], return_tensors="tf").input_ids >>> # compute loss >>> loss = model(**inputs).loss >>> round(float(loss), 2) {expected_loss} ``` """ TF_VISION_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoImageProcessor, {model_class} >>> from datasets import load_dataset >>> dataset = load_dataset("huggingface/cats-image") >>> image = dataset["test"]["image"][0] >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = image_processor(image, return_tensors="tf") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) {expected_output} ``` """ TF_VISION_SEQ_CLASS_SAMPLE = r""" Example: ```python >>> from transformers import AutoImageProcessor, {model_class} >>> import tensorflow as tf >>> from datasets import load_dataset >>> dataset = load_dataset("huggingface/cats-image") >>> image = dataset["test"]["image"][0] >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = image_processor(image, return_tensors="tf") >>> logits = model(**inputs).logits >>> # model predicts one of the 1000 ImageNet classes >>> predicted_label = int(tf.math.argmax(logits, axis=-1)) >>> print(model.config.id2label[predicted_label]) {expected_output} ``` """ TF_SAMPLE_DOCSTRINGS = { "SequenceClassification": TF_SEQUENCE_CLASSIFICATION_SAMPLE, "QuestionAnswering": TF_QUESTION_ANSWERING_SAMPLE, "TokenClassification": TF_TOKEN_CLASSIFICATION_SAMPLE, "MultipleChoice": TF_MULTIPLE_CHOICE_SAMPLE, "MaskedLM": TF_MASKED_LM_SAMPLE, "LMHead": TF_CAUSAL_LM_SAMPLE, "BaseModel": TF_BASE_MODEL_SAMPLE, "SpeechBaseModel": TF_SPEECH_BASE_MODEL_SAMPLE, "CTC": TF_SPEECH_CTC_SAMPLE, "VisionBaseModel": TF_VISION_BASE_MODEL_SAMPLE, "ImageClassification": TF_VISION_SEQ_CLASS_SAMPLE, } FLAX_TOKEN_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") >>> outputs = model(**inputs) >>> logits = outputs.logits ``` """ FLAX_QUESTION_ANSWERING_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> inputs = tokenizer(question, text, return_tensors="jax") >>> outputs = model(**inputs) >>> start_scores = outputs.start_logits >>> end_scores = outputs.end_logits ``` """ FLAX_SEQUENCE_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") >>> outputs = model(**inputs) >>> logits = outputs.logits ``` """ FLAX_MASKED_LM_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="jax") >>> outputs = model(**inputs) >>> logits = outputs.logits ``` """ FLAX_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ``` """ FLAX_MULTIPLE_CHOICE_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> choice0 = "It is eaten with a fork and a knife." >>> choice1 = "It is eaten while held in the hand." >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="jax", padding=True) >>> outputs = model(**{{k: v[None, :] for k, v in encoding.items()}}) >>> logits = outputs.logits ``` """ FLAX_CAUSAL_LM_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np") >>> outputs = model(**inputs) >>> # retrieve logts for next token >>> next_token_logits = outputs.logits[:, -1] ``` """ FLAX_SAMPLE_DOCSTRINGS = { "SequenceClassification": FLAX_SEQUENCE_CLASSIFICATION_SAMPLE, "QuestionAnswering": FLAX_QUESTION_ANSWERING_SAMPLE, "TokenClassification": FLAX_TOKEN_CLASSIFICATION_SAMPLE, "MultipleChoice": FLAX_MULTIPLE_CHOICE_SAMPLE, "MaskedLM": FLAX_MASKED_LM_SAMPLE, "BaseModel": FLAX_BASE_MODEL_SAMPLE, "LMHead": FLAX_CAUSAL_LM_SAMPLE, } def filter_outputs_from_example(docstring, **kwargs): """ Removes the lines testing an output with the doctest syntax in a code sample when it's set to `None`. """ for key, value in kwargs.items(): if value is not None: continue doc_key = "{" + key + "}" docstring = re.sub(rf"\n([^\n]+)\n\s+{doc_key}\n", "\n", docstring) return docstring def add_code_sample_docstrings( *docstr, processor_class=None, checkpoint=None, output_type=None, config_class=None, mask="[MASK]", qa_target_start_index=14, qa_target_end_index=15, model_cls=None, modality=None, expected_output=None, expected_loss=None, real_checkpoint=None, revision=None, ): def docstring_decorator(fn): # model_class defaults to function's class if not specified otherwise model_class = fn.__qualname__.split(".")[0] if model_cls is None else model_cls if model_class[:2] == "TF": sample_docstrings = TF_SAMPLE_DOCSTRINGS elif model_class[:4] == "Flax": sample_docstrings = FLAX_SAMPLE_DOCSTRINGS else: sample_docstrings = PT_SAMPLE_DOCSTRINGS # putting all kwargs for docstrings in a dict to be used # with the `.format(**doc_kwargs)`. Note that string might # be formatted with non-existing keys, which is fine. doc_kwargs = { "model_class": model_class, "processor_class": processor_class, "checkpoint": checkpoint, "mask": mask, "qa_target_start_index": qa_target_start_index, "qa_target_end_index": qa_target_end_index, "expected_output": expected_output, "expected_loss": expected_loss, "real_checkpoint": real_checkpoint, "fake_checkpoint": checkpoint, "true": "{true}", # For <Tip warning={true}> syntax that conflicts with formatting. } if ("SequenceClassification" in model_class or "AudioClassification" in model_class) and modality == "audio": code_sample = sample_docstrings["AudioClassification"] elif "SequenceClassification" in model_class: code_sample = sample_docstrings["SequenceClassification"] elif "QuestionAnswering" in model_class: code_sample = sample_docstrings["QuestionAnswering"] elif "TokenClassification" in model_class: code_sample = sample_docstrings["TokenClassification"] elif "MultipleChoice" in model_class: code_sample = sample_docstrings["MultipleChoice"] elif "MaskedLM" in model_class or model_class in ["FlaubertWithLMHeadModel", "XLMWithLMHeadModel"]: code_sample = sample_docstrings["MaskedLM"] elif "LMHead" in model_class or "CausalLM" in model_class: code_sample = sample_docstrings["LMHead"] elif "CTC" in model_class: code_sample = sample_docstrings["CTC"] elif "AudioFrameClassification" in model_class: code_sample = sample_docstrings["AudioFrameClassification"] elif "XVector" in model_class and modality == "audio": code_sample = sample_docstrings["AudioXVector"] elif "Model" in model_class and modality == "audio": code_sample = sample_docstrings["SpeechBaseModel"] elif "Model" in model_class and modality == "vision": code_sample = sample_docstrings["VisionBaseModel"] elif "Model" in model_class or "Encoder" in model_class: code_sample = sample_docstrings["BaseModel"] elif "ImageClassification" in model_class: code_sample = sample_docstrings["ImageClassification"] else: raise ValueError(f"Docstring can't be built for model {model_class}") code_sample = filter_outputs_from_example( code_sample, expected_output=expected_output, expected_loss=expected_loss ) if real_checkpoint is not None: code_sample = FAKE_MODEL_DISCLAIMER + code_sample func_doc = (fn.__doc__ or "") + "".join(docstr) output_doc = "" if output_type is None else _prepare_output_docstrings(output_type, config_class) built_doc = code_sample.format(**doc_kwargs) if revision is not None: if re.match(r"^refs/pr/\\d+", revision): raise ValueError( f"The provided revision '{revision}' is incorrect. It should point to" " a pull request reference on the hub like 'refs/pr/6'" ) built_doc = built_doc.replace( f'from_pretrained("{checkpoint}")', f'from_pretrained("{checkpoint}", revision="{revision}")' ) fn.__doc__ = func_doc + output_doc + built_doc return fn return docstring_decorator def replace_return_docstrings(output_type=None, config_class=None): def docstring_decorator(fn): func_doc = fn.__doc__ lines = func_doc.split("\n") i = 0 while i < len(lines) and re.search(r"^\s*Returns?:\s*$", lines[i]) is None: i += 1 if i < len(lines): indent = len(_get_indent(lines[i])) lines[i] = _prepare_output_docstrings(output_type, config_class, min_indent=indent) func_doc = "\n".join(lines) else: raise ValueError( f"The function {fn} should have an empty 'Return:' or 'Returns:' in its docstring as placeholder, " f"current docstring is:\n{func_doc}" ) fn.__doc__ = func_doc return fn return docstring_decorator def copy_func(f): """Returns a copy of a function f.""" # Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard) g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__, argdefs=f.__defaults__, closure=f.__closure__) g = functools.update_wrapper(g, f) g.__kwdefaults__ = f.__kwdefaults__ return g
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/dummy_tensorflow_text_objects.py
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class TFBertTokenizer(metaclass=DummyObject): _backends = ["tensorflow_text"] def __init__(self, *args, **kwargs): requires_backends(self, ["tensorflow_text"])
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/sentencepiece_model_pb2.py
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: sentencepiece_model.proto # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name="sentencepiece_model.proto", package="sentencepiece", syntax="proto2", serialized_options=b"H\003", create_key=_descriptor._internal_create_key, serialized_pb=( b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\xa1\n\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01' b" \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02" b" \x01(\t\x12\x41\n\nmodel_type\x18\x03" b" \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04" b" \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12" b' \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n' b" \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b" b" \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12" b' \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r' b" \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e" b" \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f" b" \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12" b" \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10" b" \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11" b" \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14" b" \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15" b" \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17" b" \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16" b" \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18" b" \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19" b" \x01(\x08:\x05\x66\x61lse\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e" b" \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$" b" \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18" b' \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18"' b" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18)" b" \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+" b" \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18." b" \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30" b" \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87" b" \x12+\n\x1ctrain_extremely_large_corpus\x18\x31" b' \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01' b" \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03" b" \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12" b" \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06" b' \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01' b' \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01' b" \x01(\t\x12\x10\n\x08\x65xpected\x18\x02" b' \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01' b" \x03(\x0b\x32'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02" b" \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03" b" \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04" b" \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05" b" \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01" b" \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03" b' \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03' ), ) _TRAINERSPEC_MODELTYPE = _descriptor.EnumDescriptor( name="ModelType", full_name="sentencepiece.TrainerSpec.ModelType", filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="UNIGRAM", index=0, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="BPE", index=1, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="WORD", index=2, number=3, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="CHAR", index=3, number=4, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), ], containing_type=None, serialized_options=None, serialized_start=1294, serialized_end=1347, ) _sym_db.RegisterEnumDescriptor(_TRAINERSPEC_MODELTYPE) _MODELPROTO_SENTENCEPIECE_TYPE = _descriptor.EnumDescriptor( name="Type", full_name="sentencepiece.ModelProto.SentencePiece.Type", filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="NORMAL", index=0, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="UNKNOWN", index=1, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="CONTROL", index=2, number=3, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="USER_DEFINED", index=3, number=4, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="BYTE", index=4, number=6, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="UNUSED", index=5, number=5, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), ], containing_type=None, serialized_options=None, serialized_start=2100, serialized_end=2184, ) _sym_db.RegisterEnumDescriptor(_MODELPROTO_SENTENCEPIECE_TYPE) _TRAINERSPEC = _descriptor.Descriptor( name="TrainerSpec", full_name="sentencepiece.TrainerSpec", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input", full_name="sentencepiece.TrainerSpec.input", index=0, number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="input_format", full_name="sentencepiece.TrainerSpec.input_format", index=1, number=7, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="model_prefix", full_name="sentencepiece.TrainerSpec.model_prefix", index=2, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="model_type", full_name="sentencepiece.TrainerSpec.model_type", index=3, number=3, type=14, cpp_type=8, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="vocab_size", full_name="sentencepiece.TrainerSpec.vocab_size", index=4, number=4, type=5, cpp_type=1, label=1, has_default_value=True, default_value=8000, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="accept_language", full_name="sentencepiece.TrainerSpec.accept_language", index=5, number=5, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="self_test_sample_size", full_name="sentencepiece.TrainerSpec.self_test_sample_size", index=6, number=6, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="character_coverage", full_name="sentencepiece.TrainerSpec.character_coverage", index=7, number=10, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.9995), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="input_sentence_size", full_name="sentencepiece.TrainerSpec.input_sentence_size", index=8, number=11, type=4, cpp_type=4, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="shuffle_input_sentence", full_name="sentencepiece.TrainerSpec.shuffle_input_sentence", index=9, number=19, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="mining_sentence_size", full_name="sentencepiece.TrainerSpec.mining_sentence_size", index=10, number=12, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b"\030\001", file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="training_sentence_size", full_name="sentencepiece.TrainerSpec.training_sentence_size", index=11, number=13, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b"\030\001", file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="seed_sentencepiece_size", full_name="sentencepiece.TrainerSpec.seed_sentencepiece_size", index=12, number=14, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1000000, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="shrinking_factor", full_name="sentencepiece.TrainerSpec.shrinking_factor", index=13, number=15, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.75), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="max_sentence_length", full_name="sentencepiece.TrainerSpec.max_sentence_length", index=14, number=18, type=5, cpp_type=1, label=1, has_default_value=True, default_value=4192, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="num_threads", full_name="sentencepiece.TrainerSpec.num_threads", index=15, number=16, type=5, cpp_type=1, label=1, has_default_value=True, default_value=16, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="num_sub_iterations", full_name="sentencepiece.TrainerSpec.num_sub_iterations", index=16, number=17, type=5, cpp_type=1, label=1, has_default_value=True, default_value=2, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="max_sentencepiece_length", full_name="sentencepiece.TrainerSpec.max_sentencepiece_length", index=17, number=20, type=5, cpp_type=1, label=1, has_default_value=True, default_value=16, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="split_by_unicode_script", full_name="sentencepiece.TrainerSpec.split_by_unicode_script", index=18, number=21, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="split_by_number", full_name="sentencepiece.TrainerSpec.split_by_number", index=19, number=23, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="split_by_whitespace", full_name="sentencepiece.TrainerSpec.split_by_whitespace", index=20, number=22, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="treat_whitespace_as_suffix", full_name="sentencepiece.TrainerSpec.treat_whitespace_as_suffix", index=21, number=24, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="split_digits", full_name="sentencepiece.TrainerSpec.split_digits", index=22, number=25, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="control_symbols", full_name="sentencepiece.TrainerSpec.control_symbols", index=23, number=30, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="user_defined_symbols", full_name="sentencepiece.TrainerSpec.user_defined_symbols", index=24, number=31, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="required_chars", full_name="sentencepiece.TrainerSpec.required_chars", index=25, number=36, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="byte_fallback", full_name="sentencepiece.TrainerSpec.byte_fallback", index=26, number=35, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="vocabulary_output_piece_score", full_name="sentencepiece.TrainerSpec.vocabulary_output_piece_score", index=27, number=32, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="hard_vocab_limit", full_name="sentencepiece.TrainerSpec.hard_vocab_limit", index=28, number=33, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="use_all_vocab", full_name="sentencepiece.TrainerSpec.use_all_vocab", index=29, number=34, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="unk_id", full_name="sentencepiece.TrainerSpec.unk_id", index=30, number=40, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bos_id", full_name="sentencepiece.TrainerSpec.bos_id", index=31, number=41, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="eos_id", full_name="sentencepiece.TrainerSpec.eos_id", index=32, number=42, type=5, cpp_type=1, label=1, has_default_value=True, default_value=2, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="pad_id", full_name="sentencepiece.TrainerSpec.pad_id", index=33, number=43, type=5, cpp_type=1, label=1, has_default_value=True, default_value=-1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="unk_piece", full_name="sentencepiece.TrainerSpec.unk_piece", index=34, number=45, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"<unk>".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bos_piece", full_name="sentencepiece.TrainerSpec.bos_piece", index=35, number=46, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"<s>".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="eos_piece", full_name="sentencepiece.TrainerSpec.eos_piece", index=36, number=47, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"</s>".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="pad_piece", full_name="sentencepiece.TrainerSpec.pad_piece", index=37, number=48, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"<pad>".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="unk_surface", full_name="sentencepiece.TrainerSpec.unk_surface", index=38, number=44, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b" \342\201\207 ".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="train_extremely_large_corpus", full_name="sentencepiece.TrainerSpec.train_extremely_large_corpus", index=39, number=49, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[ _TRAINERSPEC_MODELTYPE, ], serialized_options=None, is_extendable=True, syntax="proto2", extension_ranges=[ (200, 536870912), ], oneofs=[], serialized_start=45, serialized_end=1358, ) _NORMALIZERSPEC = _descriptor.Descriptor( name="NormalizerSpec", full_name="sentencepiece.NormalizerSpec", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", full_name="sentencepiece.NormalizerSpec.name", index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="precompiled_charsmap", full_name="sentencepiece.NormalizerSpec.precompiled_charsmap", index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b"", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="add_dummy_prefix", full_name="sentencepiece.NormalizerSpec.add_dummy_prefix", index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="remove_extra_whitespaces", full_name="sentencepiece.NormalizerSpec.remove_extra_whitespaces", index=3, number=4, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="escape_whitespaces", full_name="sentencepiece.NormalizerSpec.escape_whitespaces", index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="normalization_rule_tsv", full_name="sentencepiece.NormalizerSpec.normalization_rule_tsv", index=5, number=6, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=True, syntax="proto2", extension_ranges=[ (200, 536870912), ], oneofs=[], serialized_start=1361, serialized_end=1570, ) _SELFTESTDATA_SAMPLE = _descriptor.Descriptor( name="Sample", full_name="sentencepiece.SelfTestData.Sample", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input", full_name="sentencepiece.SelfTestData.Sample.input", index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="expected", full_name="sentencepiece.SelfTestData.Sample.expected", index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax="proto2", extension_ranges=[], oneofs=[], serialized_start=1641, serialized_end=1682, ) _SELFTESTDATA = _descriptor.Descriptor( name="SelfTestData", full_name="sentencepiece.SelfTestData", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="samples", full_name="sentencepiece.SelfTestData.samples", index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[ _SELFTESTDATA_SAMPLE, ], enum_types=[], serialized_options=None, is_extendable=True, syntax="proto2", extension_ranges=[ (200, 536870912), ], oneofs=[], serialized_start=1572, serialized_end=1693, ) _MODELPROTO_SENTENCEPIECE = _descriptor.Descriptor( name="SentencePiece", full_name="sentencepiece.ModelProto.SentencePiece", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="piece", full_name="sentencepiece.ModelProto.SentencePiece.piece", index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="score", full_name="sentencepiece.ModelProto.SentencePiece.score", index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="type", full_name="sentencepiece.ModelProto.SentencePiece.type", index=2, number=3, type=14, cpp_type=8, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[ _MODELPROTO_SENTENCEPIECE_TYPE, ], serialized_options=None, is_extendable=True, syntax="proto2", extension_ranges=[ (200, 536870912), ], oneofs=[], serialized_start=1985, serialized_end=2195, ) _MODELPROTO = _descriptor.Descriptor( name="ModelProto", full_name="sentencepiece.ModelProto", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="pieces", full_name="sentencepiece.ModelProto.pieces", index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="trainer_spec", full_name="sentencepiece.ModelProto.trainer_spec", index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="normalizer_spec", full_name="sentencepiece.ModelProto.normalizer_spec", index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="self_test_data", full_name="sentencepiece.ModelProto.self_test_data", index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="denormalizer_spec", full_name="sentencepiece.ModelProto.denormalizer_spec", index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[ _MODELPROTO_SENTENCEPIECE, ], enum_types=[], serialized_options=None, is_extendable=True, syntax="proto2", extension_ranges=[ (200, 536870912), ], oneofs=[], serialized_start=1696, serialized_end=2206, ) _TRAINERSPEC.fields_by_name["model_type"].enum_type = _TRAINERSPEC_MODELTYPE _TRAINERSPEC_MODELTYPE.containing_type = _TRAINERSPEC _SELFTESTDATA_SAMPLE.containing_type = _SELFTESTDATA _SELFTESTDATA.fields_by_name["samples"].message_type = _SELFTESTDATA_SAMPLE _MODELPROTO_SENTENCEPIECE.fields_by_name["type"].enum_type = _MODELPROTO_SENTENCEPIECE_TYPE _MODELPROTO_SENTENCEPIECE.containing_type = _MODELPROTO _MODELPROTO_SENTENCEPIECE_TYPE.containing_type = _MODELPROTO_SENTENCEPIECE _MODELPROTO.fields_by_name["pieces"].message_type = _MODELPROTO_SENTENCEPIECE _MODELPROTO.fields_by_name["trainer_spec"].message_type = _TRAINERSPEC _MODELPROTO.fields_by_name["normalizer_spec"].message_type = _NORMALIZERSPEC _MODELPROTO.fields_by_name["self_test_data"].message_type = _SELFTESTDATA _MODELPROTO.fields_by_name["denormalizer_spec"].message_type = _NORMALIZERSPEC DESCRIPTOR.message_types_by_name["TrainerSpec"] = _TRAINERSPEC DESCRIPTOR.message_types_by_name["NormalizerSpec"] = _NORMALIZERSPEC DESCRIPTOR.message_types_by_name["SelfTestData"] = _SELFTESTDATA DESCRIPTOR.message_types_by_name["ModelProto"] = _MODELPROTO _sym_db.RegisterFileDescriptor(DESCRIPTOR) TrainerSpec = _reflection.GeneratedProtocolMessageType( "TrainerSpec", (_message.Message,), { "DESCRIPTOR": _TRAINERSPEC, "__module__": "sentencepiece_model_pb2", # @@protoc_insertion_point(class_scope:sentencepiece.TrainerSpec) }, ) _sym_db.RegisterMessage(TrainerSpec) NormalizerSpec = _reflection.GeneratedProtocolMessageType( "NormalizerSpec", (_message.Message,), { "DESCRIPTOR": _NORMALIZERSPEC, "__module__": "sentencepiece_model_pb2", # @@protoc_insertion_point(class_scope:sentencepiece.NormalizerSpec) }, ) _sym_db.RegisterMessage(NormalizerSpec) SelfTestData = _reflection.GeneratedProtocolMessageType( "SelfTestData", (_message.Message,), { "Sample": _reflection.GeneratedProtocolMessageType( "Sample", (_message.Message,), { "DESCRIPTOR": _SELFTESTDATA_SAMPLE, "__module__": "sentencepiece_model_pb2", # @@protoc_insertion_point(class_scope:sentencepiece.SelfTestData.Sample) }, ), "DESCRIPTOR": _SELFTESTDATA, "__module__": "sentencepiece_model_pb2", # @@protoc_insertion_point(class_scope:sentencepiece.SelfTestData) }, ) _sym_db.RegisterMessage(SelfTestData) _sym_db.RegisterMessage(SelfTestData.Sample) ModelProto = _reflection.GeneratedProtocolMessageType( "ModelProto", (_message.Message,), { "SentencePiece": _reflection.GeneratedProtocolMessageType( "SentencePiece", (_message.Message,), { "DESCRIPTOR": _MODELPROTO_SENTENCEPIECE, "__module__": "sentencepiece_model_pb2", # @@protoc_insertion_point(class_scope:sentencepiece.ModelProto.SentencePiece) }, ), "DESCRIPTOR": _MODELPROTO, "__module__": "sentencepiece_model_pb2", # @@protoc_insertion_point(class_scope:sentencepiece.ModelProto) }, ) _sym_db.RegisterMessage(ModelProto) _sym_db.RegisterMessage(ModelProto.SentencePiece) DESCRIPTOR._options = None _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # @@protoc_insertion_point(module_scope)
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/dummy_speech_objects.py
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class ASTFeatureExtractor(metaclass=DummyObject): _backends = ["speech"] def __init__(self, *args, **kwargs): requires_backends(self, ["speech"]) class Speech2TextFeatureExtractor(metaclass=DummyObject): _backends = ["speech"] def __init__(self, *args, **kwargs): requires_backends(self, ["speech"])
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/__init__.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from huggingface_hub import get_full_repo_name # for backward compatibility from huggingface_hub.constants import HF_HUB_DISABLE_TELEMETRY as DISABLE_TELEMETRY # for backward compatibility from packaging import version from .. import __version__ from .backbone_utils import BackboneConfigMixin, BackboneMixin from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from .doc import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, copy_func, replace_return_docstrings, ) from .generic import ( ContextManagers, ExplicitEnum, ModelOutput, PaddingStrategy, TensorType, add_model_info_to_auto_map, cached_property, can_return_loss, expand_dims, find_labels, flatten_dict, infer_framework, is_jax_tensor, is_numpy_array, is_tensor, is_tf_symbolic_tensor, is_tf_tensor, is_torch_device, is_torch_dtype, is_torch_tensor, reshape, squeeze, strtobool, tensor_size, to_numpy, to_py_obj, transpose, working_or_temp_dir, ) from .hub import ( CLOUDFRONT_DISTRIB_PREFIX, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, EntryNotFoundError, PushInProgress, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, cached_file, default_cache_path, define_sagemaker_information, download_url, extract_commit_hash, get_cached_models, get_file_from_repo, has_file, http_user_agent, is_offline_mode, is_remote_url, move_cache, send_example_telemetry, try_to_load_from_cache, ) from .import_utils import ( ACCELERATE_MIN_VERSION, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, USE_JAX, USE_TF, USE_TORCH, XLA_FSDPV2_MIN_VERSION, DummyObject, OptionalDependencyNotAvailable, _LazyModule, ccl_version, direct_transformers_import, get_torch_version, is_accelerate_available, is_apex_available, is_aqlm_available, is_auto_awq_available, is_auto_gptq_available, is_av_available, is_bitsandbytes_available, is_bs4_available, is_coloredlogs_available, is_cv2_available, is_cython_available, is_datasets_available, is_decord_available, is_detectron2_available, is_eetq_available, is_essentia_available, is_faiss_available, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, is_flax_available, is_fsdp_available, is_ftfy_available, is_g2p_en_available, is_galore_torch_available, is_hqq_available, is_in_notebook, is_ipex_available, is_jieba_available, is_jinja_available, is_jumanpp_available, is_kenlm_available, is_keras_nlp_available, is_levenshtein_available, is_librosa_available, is_mlx_available, is_natten_available, is_ninja_available, is_nltk_available, is_onnx_available, is_openai_available, is_optimum_available, is_pandas_available, is_peft_available, is_phonemizer_available, is_pretty_midi_available, is_protobuf_available, is_psutil_available, is_py3nvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytest_available, is_pytorch_quantization_available, is_quanto_available, is_rjieba_available, is_sacremoses_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_sudachi_available, is_sudachi_projection_available, is_tensorflow_probability_available, is_tensorflow_text_available, is_tf2onnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bf16_available, is_torch_bf16_available_on_device, is_torch_bf16_cpu_available, is_torch_bf16_gpu_available, is_torch_compile_available, is_torch_cuda_available, is_torch_fp16_available_on_device, is_torch_fx_available, is_torch_fx_proxy, is_torch_mlu_available, is_torch_mps_available, is_torch_neuroncore_available, is_torch_npu_available, is_torch_sdpa_available, is_torch_tensorrt_fx_available, is_torch_tf32_available, is_torch_tpu_available, is_torch_xla_available, is_torch_xpu_available, is_torchaudio_available, is_torchdistx_available, is_torchdynamo_available, is_torchdynamo_compiling, is_torchvision_available, is_training_run_on_sagemaker, is_vision_available, requires_backends, torch_only_method, ) from .peft_utils import ( ADAPTER_CONFIG_NAME, ADAPTER_SAFE_WEIGHTS_NAME, ADAPTER_WEIGHTS_NAME, check_peft_version, find_adapter_config_file, ) WEIGHTS_NAME = "pytorch_model.bin" WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json" TF2_WEIGHTS_NAME = "tf_model.h5" TF2_WEIGHTS_INDEX_NAME = "tf_model.h5.index.json" TF_WEIGHTS_NAME = "model.ckpt" FLAX_WEIGHTS_NAME = "flax_model.msgpack" FLAX_WEIGHTS_INDEX_NAME = "flax_model.msgpack.index.json" SAFE_WEIGHTS_NAME = "model.safetensors" SAFE_WEIGHTS_INDEX_NAME = "model.safetensors.index.json" CONFIG_NAME = "config.json" FEATURE_EXTRACTOR_NAME = "preprocessor_config.json" IMAGE_PROCESSOR_NAME = FEATURE_EXTRACTOR_NAME PROCESSOR_NAME = "processor_config.json" GENERATION_CONFIG_NAME = "generation_config.json" MODEL_CARD_NAME = "modelcard.json" SENTENCEPIECE_UNDERLINE = "▁" SPIECE_UNDERLINE = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility MULTIPLE_CHOICE_DUMMY_INPUTS = [ [[0, 1, 0, 1], [1, 0, 0, 1]] ] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] DUMMY_MASK = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] def check_min_version(min_version): if version.parse(__version__) < version.parse(min_version): if "dev" in min_version: error_message = ( "This example requires a source install from HuggingFace Transformers (see " "`https://huggingface.co/docs/transformers/installation#install-from-source`)," ) else: error_message = f"This example requires a minimum version of {min_version}," error_message += f" but the version found is {__version__}.\n" raise ImportError( error_message + "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other " "versions of HuggingFace Transformers." )
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/constants.py
IMAGENET_DEFAULT_MEAN = [0.485, 0.456, 0.406] IMAGENET_DEFAULT_STD = [0.229, 0.224, 0.225] IMAGENET_STANDARD_MEAN = [0.5, 0.5, 0.5] IMAGENET_STANDARD_STD = [0.5, 0.5, 0.5] OPENAI_CLIP_MEAN = [0.48145466, 0.4578275, 0.40821073] OPENAI_CLIP_STD = [0.26862954, 0.26130258, 0.27577711]
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/dummy_music_objects.py
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class Pop2PianoFeatureExtractor(metaclass=DummyObject): _backends = ["music"] def __init__(self, *args, **kwargs): requires_backends(self, ["music"]) class Pop2PianoTokenizer(metaclass=DummyObject): _backends = ["music"] def __init__(self, *args, **kwargs): requires_backends(self, ["music"])
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/dummy_tf_objects.py
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class TensorFlowBenchmarkArguments(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TensorFlowBenchmark(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFForcedBOSTokenLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFForcedEOSTokenLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFForceTokensLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGenerationMixin(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLogitsProcessorList(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLogitsWarper(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMinLengthLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFNoBadWordsLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFNoRepeatNGramLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRepetitionPenaltyLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSuppressTokensAtBeginLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSuppressTokensLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTemperatureLogitsWarper(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTopKLogitsWarper(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTopPLogitsWarper(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class KerasMetricCallback(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class PushToHubCallback(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSequenceSummary(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSharedEmbeddings(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) def shape_list(*args, **kwargs): requires_backends(shape_list, ["tf"]) TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFAlbertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAlbertForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAlbertForPreTraining(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAlbertForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAlbertForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAlbertForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAlbertMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAlbertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAlbertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = None TF_MODEL_FOR_CAUSAL_LM_MAPPING = None TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = None TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = None TF_MODEL_FOR_MASK_GENERATION_MAPPING = None TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = None TF_MODEL_FOR_MASKED_LM_MAPPING = None TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = None TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = None TF_MODEL_FOR_PRETRAINING_MAPPING = None TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING = None TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = None TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = None TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = None TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = None TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = None TF_MODEL_FOR_TEXT_ENCODING_MAPPING = None TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None TF_MODEL_FOR_VISION_2_SEQ_MAPPING = None TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING = None TF_MODEL_MAPPING = None TF_MODEL_WITH_LM_HEAD_MAPPING = None class TFAutoModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForAudioClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForDocumentQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForMaskedImageModeling(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForMaskGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForNextSentencePrediction(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForPreTraining(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForSemanticSegmentation(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForSeq2SeqLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForSpeechSeq2Seq(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForTableQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForTextEncoding(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForVision2Seq(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForZeroShotImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelWithLMHead(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBartForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBartForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBartModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBartPretrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFBertEmbeddings(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertForNextSentencePrediction(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertForPreTraining(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertLMHeadModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlenderbotForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlenderbotModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlenderbotPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlenderbotSmallForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlenderbotSmallModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlenderbotSmallPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFBlipForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlipForImageTextRetrieval(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlipForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlipModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlipPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlipTextModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlipVisionModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFCamembertForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCamembertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCamembertForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCamembertForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCamembertForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCamembertForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCamembertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCamembertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFCLIPModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCLIPPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCLIPTextModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCLIPVisionModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFConvBertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvBertForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvBertForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvBertForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvBertForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvBertLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvBertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvBertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvNextForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvNextModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvNextPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvNextV2ForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvNextV2Model(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvNextV2PreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFCTRLForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCTRLLMHeadModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCTRLModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCTRLPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFCvtForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCvtModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCvtPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFData2VecVisionForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFData2VecVisionForSemanticSegmentation(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFData2VecVisionModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFData2VecVisionPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFDebertaForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFDebertaV2ForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaV2ForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaV2ForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaV2ForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaV2ForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaV2Model(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaV2PreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFDeiTForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDeiTForImageClassificationWithTeacher(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDeiTForMaskedImageModeling(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDeiTModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDeiTPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFAdaptiveEmbedding(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTransfoXLForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTransfoXLLMHeadModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTransfoXLMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTransfoXLModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTransfoXLPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFDistilBertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDistilBertForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDistilBertForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDistilBertForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDistilBertForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDistilBertMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDistilBertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDistilBertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFDPRContextEncoder(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDPRPretrainedContextEncoder(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDPRPretrainedQuestionEncoder(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDPRPretrainedReader(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDPRQuestionEncoder(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDPRReader(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFEfficientFormerForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFEfficientFormerForImageClassificationWithTeacher(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFEfficientFormerModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFEfficientFormerPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFElectraForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFElectraForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFElectraForPreTraining(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFElectraForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFElectraForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFElectraForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFElectraModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFElectraPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFEncoderDecoderModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) ESM_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFEsmForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFEsmForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFEsmForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFEsmModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFEsmPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFFlaubertForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFlaubertForQuestionAnsweringSimple(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFlaubertForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFlaubertForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFlaubertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFlaubertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFlaubertWithLMHeadModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFFunnelBaseModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFunnelForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFunnelForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFunnelForPreTraining(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFunnelForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFunnelForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFunnelForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFunnelModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFunnelPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFGPT2DoubleHeadsModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPT2ForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPT2LMHeadModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPT2MainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPT2Model(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPT2PreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPTJForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPTJForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPTJForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPTJModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPTJPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFGroupViTModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGroupViTPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGroupViTTextModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGroupViTVisionModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFHubertForCTC(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFHubertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFHubertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFLayoutLMForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFLayoutLMv3ForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMv3ForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMv3ForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMv3Model(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMv3PreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLEDForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLEDModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLEDPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFLongformerForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLongformerForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLongformerForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLongformerForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLongformerForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLongformerModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLongformerPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLongformerSelfAttention(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFLxmertForPreTraining(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLxmertMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLxmertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLxmertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLxmertVisualFeatureEncoder(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMarianModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMarianMTModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMarianPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMBartForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMBartModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMBartPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFMobileBertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertForNextSentencePrediction(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertForPreTraining(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFMobileViTForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileViTForSemanticSegmentation(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileViTModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileViTPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFMPNetForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMPNetForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMPNetForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMPNetForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMPNetForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMPNetMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMPNetModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMPNetPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMT5EncoderModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMT5Model(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFOpenAIGPTDoubleHeadsModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFOpenAIGPTForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFOpenAIGPTLMHeadModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFOpenAIGPTMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFOpenAIGPTModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFOpenAIGPTPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFOPTForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFOPTModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFOPTPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFPegasusForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFPegasusModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFPegasusPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRagModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRagPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRagSequenceForGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRagTokenForGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFRegNetForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRegNetModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRegNetPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFRemBertForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRemBertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRemBertForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRemBertForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRemBertForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRemBertForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRemBertLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRemBertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRemBertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFResNetForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFResNetModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFResNetPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFRobertaForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFRobertaPreLayerNormForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreLayerNormForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreLayerNormForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreLayerNormForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreLayerNormForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreLayerNormForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreLayerNormMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreLayerNormModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreLayerNormPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFRoFormerForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRoFormerForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRoFormerForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRoFormerForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRoFormerForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRoFormerForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRoFormerLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRoFormerModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRoFormerPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_SAM_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFSamModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSamPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFSegformerDecodeHead(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSegformerForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSegformerForSemanticSegmentation(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSegformerModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSegformerPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFSpeech2TextForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSpeech2TextModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSpeech2TextPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFSwiftFormerForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSwiftFormerModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSwiftFormerPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFSwinForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSwinForMaskedImageModeling(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSwinModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSwinPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFT5EncoderModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFT5Model(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFT5PreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFTapasForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTapasForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTapasForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTapasModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTapasPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFVisionEncoderDecoderModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFVisionTextDualEncoderModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFViTForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFViTModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFViTPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFViTMAEForPreTraining(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFViTMAEModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFViTMAEPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFWav2Vec2ForCTC(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFWav2Vec2ForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFWav2Vec2Model(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFWav2Vec2PreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFWhisperForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFWhisperModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFWhisperPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFXGLMForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXGLMModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXGLMPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFXLMForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMForQuestionAnsweringSimple(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMWithLMHeadModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFXLMRobertaForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMRobertaForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMRobertaForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMRobertaForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMRobertaForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMRobertaForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMRobertaModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMRobertaPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFXLNetForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLNetForQuestionAnsweringSimple(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLNetForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLNetForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLNetLMHeadModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLNetMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLNetModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLNetPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class AdamWeightDecay(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class GradientAccumulator(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class WarmUp(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) def create_optimizer(*args, **kwargs): requires_backends(create_optimizer, ["tf"])
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/bitsandbytes.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings warnings.warn( "transformers.utils.bitsandbytes module is deprecated and will be removed in a future version. Please import bitsandbytes modules directly from transformers.integrations", FutureWarning, ) from ..integrations import ( # noqa get_keys_to_not_convert, replace_8bit_linear, replace_with_bnb_linear, set_module_8bit_tensor_to_device, set_module_quantized_tensor_to_device, )
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/fx.py
# coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import builtins import collections import contextlib import functools import inspect import math import operator import os import random import sys import warnings from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Type, Union import torch import torch.utils._pytree as pytree from torch import nn from torch.fx import Graph, GraphModule, Node, Proxy, Tracer from torch.fx._compatibility import compatibility from torch.fx._symbolic_trace import is_fx_tracing from torch.fx.proxy import ParameterProxy from .. import logging from ..cache_utils import Cache, DynamicCache, SinkCache, StaticCache from ..modeling_utils import PretrainedConfig, PreTrainedModel from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_IMAGE_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..pytorch_utils import is_torch_greater_or_equal_than_2_0 from .import_utils import ( ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, get_torch_version, is_peft_available, is_torch_fx_available, ) if is_peft_available(): from peft import PeftModel logger = logging.get_logger(__name__) _IS_IN_DEBUG_MODE = os.environ.get("FX_DEBUG_MODE", "").upper() in ENV_VARS_TRUE_VALUES def _generate_supported_model_class_names( model_name: Type[PretrainedConfig], supported_tasks: Optional[Union[str, List[str]]] = None, ) -> List[str]: task_mapping = { "default": MODEL_MAPPING_NAMES, "pretraining": MODEL_FOR_PRETRAINING_MAPPING_NAMES, "next-sentence-prediction": MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, "masked-lm": MODEL_FOR_MASKED_LM_MAPPING_NAMES, "causal-lm": MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, "seq2seq-lm": MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, "speech-seq2seq": MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, "multiple-choice": MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, "document-question-answering": MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, "question-answering": MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, "sequence-classification": MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, "token-classification": MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, "masked-image-modeling": MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, "image-classification": MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, "zero-shot-image-classification": MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES, "ctc": MODEL_FOR_CTC_MAPPING_NAMES, "audio-classification": MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, "semantic-segmentation": MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, "backbone": MODEL_FOR_BACKBONE_MAPPING_NAMES, "image-feature-extraction": MODEL_FOR_IMAGE_MAPPING_NAMES, } if supported_tasks is None: supported_tasks = task_mapping.keys() if isinstance(supported_tasks, str): supported_tasks = [supported_tasks] model_class_names = [] for task in supported_tasks: class_name = task_mapping[task].get(model_name, None) if class_name: model_class_names.append(class_name) return model_class_names _REGULAR_SUPPORTED_MODEL_NAMES_AND_TASKS = [ "altclip", "albert", "bart", "bert", "blenderbot", "blenderbot-small", "bloom", "clip", "convnext", "deberta", "deberta-v2", "dinov2", "distilbert", "donut-swin", "electra", "gpt2", "gpt_neo", "gptj", "hubert", "layoutlm", "llama", "cohere", "lxmert", "m2m_100", "marian", "mbart", "megatron-bert", "mistral", "mixtral", "mobilebert", "mt5", "nezha", "opt", "pegasus", "plbart", "qwen2", "qwen2_moe", "resnet", "roberta", "segformer", "speech_to_text", "speech_to_text_2", "swin", "t5", "trocr", "vit", "xglm", "wav2vec2", # "xlnet", ] _FX_SUPPORTED_MODELS_WITH_KV_CACHE = ["llama", "opt"] _REGULAR_SUPPORTED_MODELS = [] for item in _REGULAR_SUPPORTED_MODEL_NAMES_AND_TASKS: if isinstance(item, dict): _REGULAR_SUPPORTED_MODELS.extend(_generate_supported_model_class_names(**item)) else: _REGULAR_SUPPORTED_MODELS.extend(_generate_supported_model_class_names(item)) _SPECIAL_SUPPORTED_MODELS = [ "CLIPTextModel", "CLIPTextModelWithProjection", "CLIPVisionModel", "CLIPVisionModelWithProjection", "AltCLIPTextModel", "AltCLIPVisionModel", "GitVisionModel", "GPT2DoubleHeadsModel", "Speech2Text2Decoder", "TrOCRDecoder", "PeftModelForCausalLM", "PeftModelForSeq2SeqLM", # TODO: add support for them as it should be quite easy to do so (small blocking issues). # XLNetForQuestionAnswering, ] _SUPPORTED_MODELS = tuple(sorted(set(_REGULAR_SUPPORTED_MODELS + _SPECIAL_SUPPORTED_MODELS))) _CURRENT_TRACER = None def torch_nn_embedding(self, input): return torch.empty(*input.shape, self.weight.shape[-1], device="meta", dtype=self.weight.dtype) def torch_nn_functional_embedding( input, weight, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False ): return torch.empty(*input.shape, weight.shape[-1], device="meta", dtype=weight.dtype) def torch_nn_layernorm(self, input): return input def torch_nn_groupnorm(self, input): return input def torch_nn_linear(self, input): return torch.empty(input.shape[:-1] + (self.out_features,), device="meta") def torch_relu(x): return x def torch_nn_relu(self, x): return x def torch_nn_functional_relu(x, inplace=False): if not inplace: raise ValueError("Don't support in-place functional.relu for MetaTensor analysis") return x def torch_where(condition, x, y): # torch.where returns the broadcasted tensor of condition, x, and y, # so hack it by using addition return condition.to(device="meta") + x.to(device="meta") + y.to(device="meta") def torch_abs(input, *, out=None): if out is not None: raise ValueError("Don't support in-place abs for MetaTensor analysis") return input def torch_arange(*args, **kwargs): n = len(args) step = 1 if n == 1: start = 0 end = args[0] elif n == 2: start, end = args else: start, end, step = args if isinstance(start, float): start = int(start) if isinstance(end, float): start = int(end) if isinstance(step, float): step = int(step) step = kwargs.get("step", step) dtype = kwargs.get("dtype") return torch.empty((end - start) // step, dtype=dtype, device="meta") def torch_full(*args, **kwargs): args = list(args) # We set the fill value to 1 as its value is not important as long as it's not a tensor on the `meta` device. if len(args) > 1: args[1] = 1 else: kwargs["fill_value"] = 1 kwargs_without_device = dict(kwargs) kwargs_without_device.pop("device", None) return torch.full(*args, **kwargs_without_device, device="meta") def torch_cat(tensors, dim=None, axis=None, *, out=None): if dim is None and axis is None: dim = 0 if dim is None and axis is not None: dim = axis if dim < 0: dim = tensors[0].dim() + dim shapes = [t.shape for t in tensors] shape = list(shapes[0]) concatenated_dim = sum(shape[dim] for shape in shapes) final_shape = shape[:dim] + [concatenated_dim] + shape[dim + 1 :] return torch.empty(final_shape, device="meta") def torch_stack(tensors, dim=None, axis=None, *, out=None): if dim is None and axis is None: dim = 0 if dim is None and axis is not None: dim = axis if dim < 0: dim = tensors[0].dim() + 1 + dim shape = list(tensors[0].shape) shape.insert(dim, len(tensors)) return torch.empty(shape, device="meta") def torch_add(input, other, *, alpha=1, out=None): if not isinstance(input, torch.Tensor): return torch.empty_like(other, device="meta") if not isinstance(other, torch.Tensor): return torch.empty_like(input, device="meta") max_length = max(input.dim(), other.dim()) input_shape = list(input.shape) + [1] * (max_length - input.dim()) other_shape = list(other.shape) + [1] * (max_length - other.dim()) shape = [] for i in range(max_length): shape.append(max(input_shape[i], other_shape[i])) return torch.empty(shape, device="meta") def torch_mul(input, other, *, out=None): return torch_add(input, other, out=out) def torch_tensor_mul(self, other): return torch_mul(self, other) def torch_matmul(input, other, *, out=None): d1 = input.dim() d2 = other.dim() shape = None if d1 == 1 and d2 == 1: shape = None elif d1 == 2 and d2 == 2: shape = (input.size(0), other.size(1)) elif d1 == 1 and d2 == 2: shape = (other.size(1),) elif d1 == 2 and d1 == 1: shape = (input.size(0),) else: max_length = max(input.dim(), other.dim()) shape1 = list(input.shape) shape2 = list(other.shape) if d1 == 1: shape1 = [1] + shape1 if d2 == 1: shape2.append(1) shape1 = [-1] * (max_length - d1) + list(input.shape) shape2 = [-1] * (max_length - d2) + list(other.shape) shape = [] for i in range(max_length): shape.append(max(shape1[i], shape2[i])) shape[-2] = shape1[-2] shape[-1] = shape2[-1] if d1 == 1: shape.pop(-2) if d2 == 1: shape.pop(-1) if shape is None: return torch.tensor(0.0, device="meta") return torch.empty(*shape, device="meta") def torch_bmm(input, mat2, *, out=None): if out is not None: raise ValueError("Don't support in-place bmm for MetaTensor analysis") batch_size, n, m = input.shape _, _, p = mat2.shape return torch.empty(batch_size, n, p, device="meta") def torch_baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None): if out is not None: raise ValueError("Don't support in-place baddbmm for MetaTensor analysis") return torch_bmm(batch1, batch2) def torch_tensor_baddbmm(self, batch1, batch2, *, beta=1, alpha=1, out=None): return torch_baddbmm(self, batch1, batch2, beta=beta, alpha=alpha, out=out) def torch_einsum(equation, *operands): # TODO: infer shape without performing the computation, this might be quite hard. concrete_operands = (torch.empty_like(operand, device="cpu") for operand in operands) return torch.einsum(equation, *concrete_operands).to("meta") def torch_tensor_repeat(self, *sizes): shape = list(self.shape) for i, x in enumerate(sizes): shape[i] *= x return torch.empty(shape, device="meta") def torch_repeat_interleave(*args, dim=None, output_size=None): num_args = len(args) if num_args == 1: shape = [output_size if output_size is not None else args[0].sum()] else: shape = list(args[0].shape) if dim is None: if num_args > 2: dim = args[2] else: shape = [sum(shape)] dim = 0 repeats = args[1] if isinstance(repeats, int) or torch.numel(repeats) == 1: shape[dim] *= int(repeats) else: shape[dim] = output_size if output_size is not None else repeats.sum() return torch.empty(*shape, device="meta") def torch_index_select(input, dim, index, *, out=None): shape = list(input.shape) shape[dim] = len(index) return torch.empty(*shape, device="meta") def torch_tensor_index_select(self, dim, index): return torch_index_select(self, dim, index) def torch_gather(input, dim, index, *, sparse_grad=False, out=None): shape = list(input.shape) shape[dim] = index.shape[dim] return torch.empty(*shape, device="meta") def torch_tensor_gather(self, dim, index): return torch_gather(self, dim, index) def torch_roll(input, shifts, dims=None): return input def torch_flip(input, dims): return input def torch_tensor_flip(self, dims): return self def torch_nn_conv1d(self, input): l_in = input.shape[-1] shape = None padding = self.padding if padding == "valid": padding = (0, 0) if padding == "same": shape = list(input.shape) if shape is None: shape = list(input.shape) l_out = math.floor( (l_in + 2 * padding[0] - self.dilation[0] * (self.kernel_size[0] - 1) - 1) / self.stride[0] + 1 ) shape[-1] = l_out shape[-2] = self.out_channels return torch.empty(shape, device="meta") def torch_nn_conv2d(self, input): h_in, w_in = input.shape[-2:] shape = None padding = self.padding if padding == "valid": padding = (0, 0) if padding == "same": shape = list(input.shape) if shape is None: shape = list(input.shape) h_out = math.floor( (h_in + 2 * padding[0] - self.dilation[0] * (self.kernel_size[0] - 1) - 1) / self.stride[0] + 1 ) w_out = math.floor( (w_in + 2 * padding[1] - self.dilation[1] * (self.kernel_size[1] - 1) - 1) / self.stride[1] + 1 ) shape[-2:] = [h_out, w_out] shape[-3] = self.out_channels return torch.empty(shape, device="meta") def torch_squeeze(input, dim=None): shape = list(input.shape) if dim is not None: if dim < 0: dim = input.dim() + dim if shape[dim] == 1: shape.pop(dim) else: new_shape = [] for dim_value in shape: if dim_value == 1: continue new_shape.append(dim_value) shape = new_shape return torch.empty(shape, device="meta") def torch_tensor_squeeze(self, dim=None): return torch_squeeze(self, dim) def torch_unsqueeze(input, dim): shape = list(input.shape) if dim < 0: dim = input.dim() + 1 + dim shape.insert(dim, 1) return torch.empty(shape, device="meta") def torch_tensor_unsqueeze(self, dim): return torch_unsqueeze(self, dim) def torch_unique_consecutive(input, **kwargs): output = torch.unique_consecutive(torch.zeros_like(input, device="cpu"), **kwargs) if isinstance(output, torch.Tensor): return output.to("meta") else: return tuple(map(output, lambda x: x.to("meta"))) def torch_nn_functional_one_hot(tensor, num_classes=-1): if num_classes < 0: raise ValueError("Don't support automatic num_classes inference for MetaTensor analysis") shape = list(tensor.shape) + [num_classes] return torch.empty(shape, device="meta") def torch_nn_functional_scaled_dot_product_attention( query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False, scale=None ): target_length = query.shape[-2] head_dim = value.shape[-1] return torch.empty((*query.shape[:-2], target_length, head_dim), device="meta") def torch_nn_mseloss(self, input, target): if self.reduction == "none": shape = target.shape else: shape = (1,) return torch.empty(shape, device="meta") def torch_nn_crossentropyloss(self, input, target): if self.reduction == "none": shape = target.shape else: shape = (1,) return torch.empty(shape, device="meta") def torch_nn_bcewithlogitsloss(self, input, target): if self.reduction == "none": shape = target.shape else: shape = (1,) return torch.empty(shape, device="meta") def operator_getitem(a, b): def to_concrete(t): if isinstance(t, torch.Tensor): concrete = torch.ones_like(t, device="cpu") if concrete.dtype in [torch.float16, torch.float32, torch.float64, torch.int32]: concrete = concrete.to(torch.int64) return concrete return t if isinstance(a, torch.Tensor): # TODO: infer shape without performing the computation. if isinstance(b, tuple): b = tuple(map(to_concrete, b)) else: b = to_concrete(b) return operator.getitem(torch.empty_like(a, device="cpu"), b).to("meta") return operator.getitem(a, b) _MANUAL_META_OVERRIDES: Dict[Callable, Callable] = { torch.nn.Embedding: torch_nn_embedding, torch.nn.functional.embedding: torch_nn_functional_embedding, torch.nn.LayerNorm: torch_nn_layernorm, torch.nn.GroupNorm: torch_nn_groupnorm, torch.nn.Linear: torch_nn_linear, torch.relu: torch_relu, torch.nn.functional.relu: torch_nn_functional_relu, torch.nn.ReLU: torch_nn_relu, torch.where: torch_where, torch.abs: torch_abs, torch.arange: torch_arange, torch.full: torch_full, torch.cat: torch_cat, torch.stack: torch_stack, torch.add: torch_add, torch.mul: torch_mul, torch.Tensor.mul: torch_tensor_mul, torch.matmul: torch_matmul, torch.bmm: torch_bmm, torch.baddbmm: torch_baddbmm, torch.Tensor.baddbmm: torch_tensor_baddbmm, torch.einsum: torch_einsum, torch.Tensor.repeat: torch_tensor_repeat, torch.repeat_interleave: torch_repeat_interleave, torch.roll: torch_roll, torch.flip: torch_flip, torch.Tensor.flip: torch_tensor_flip, torch.index_select: torch_index_select, torch.Tensor.index_select: torch_tensor_index_select, torch.gather: torch_gather, torch.Tensor.gather: torch_tensor_gather, torch.nn.Conv1d: torch_nn_conv1d, torch.nn.Conv2d: torch_nn_conv2d, torch.squeeze: torch_squeeze, torch.Tensor.squeeze: torch_tensor_squeeze, torch.unsqueeze: torch_unsqueeze, torch.Tensor.unsqueeze: torch_tensor_unsqueeze, torch.unique_consecutive: torch_unique_consecutive, torch.nn.functional.one_hot: torch_nn_functional_one_hot, torch.nn.MSELoss: torch_nn_mseloss, torch.nn.CrossEntropyLoss: torch_nn_crossentropyloss, torch.nn.BCEWithLogitsLoss: torch_nn_bcewithlogitsloss, operator.getitem: operator_getitem, } if is_torch_greater_or_equal_than_2_0: _MANUAL_META_OVERRIDES[ torch.nn.functional.scaled_dot_product_attention ] = torch_nn_functional_scaled_dot_product_attention class HFProxy(Proxy): """ Proxy that uses metadata to handle data-dependent control-flow. """ def install_metadata(self, metadata): self._metadata = metadata @property def shape(self): return self.tracer.create_proxy("call_method", "size", (self,), {}) @property def device(self): # Hack so we can track when devices are used. During meta-tensor propagation, # replace these values with a constant 'meta' return MetaDeviceAttribute(self, "device") def __len__(self): if hasattr(self, "_metadata") and self._metadata is not None: return len(self._metadata) return super().__len__() def __bool__(self): if hasattr(self, "_metadata") and self._metadata is not None: return self._metadata return super().__bool__() def __getattr__(self, k): if k == "_metadata": return self.__getattribute__(k) # note: not added to the graph yet, if this is a method call # we peephole optimize to the method invocation return HFAttribute(self, k) def __setitem__(self, indices, values): return self.tracer.create_proxy("call_function", operator.setitem, (self, indices, values), {}) def __contains__(self, key): if hasattr(self, "_metadata") and self._metadata is not None: return key in self._metadata return super().__contains__(key) class HFAttribute(HFProxy): def __init__(self, root, attr: str): self.root = root self.attr = attr self.tracer = root.tracer self._node = None if hasattr(self.root, "_metadata"): self.install_metadata(getattr(self.root._metadata, attr)) @property def node(self): # the node for attributes is added lazily, since most will just be method calls # which do not rely on the getitem call if self._node is None: self._node = self.tracer.create_proxy("call_function", builtins.getattr, (self.root, self.attr), {}).node return self._node def __call__(self, *args, **kwargs): return self.tracer.create_proxy("call_method", self.attr, (self.root,) + args, kwargs) class MetaDeviceAttribute(HFAttribute): pass class HFCacheProxy(HFProxy): """ Proxy that represents an instance of `transformers.cache_utils.Cache`. """ def install_orig_cache_cls(self, orig_cache_cls: Type[Cache]): self._orig_cache_cls = orig_cache_cls @property def __class__(self): if not hasattr(self, "_orig_cache_cls"): raise RuntimeError("The original Cache class must be installed to the HFCacheProxy.") return self.tracer._CLASSES_TO_PATCH[self._orig_cache_cls] def create_wrapper( function: Callable, op_type: Union[Literal["call_function"], Literal["call_method"], Literal["get_attr"]], proxy_factory_fn: Optional[Callable[[Node], Proxy]] = None, ) -> Callable: @functools.wraps(function) def wrapper(*args, **kwargs): if not is_fx_tracing(): return function(*args, **kwargs) found_proxies = [] def check_proxy(a): if isinstance(a, Proxy): found_proxies.append(a) torch.fx.node.map_aggregate(args, check_proxy) torch.fx.node.map_aggregate(kwargs, check_proxy) if len(found_proxies) > 0: tracer = found_proxies[0].tracer if op_type == "call_function": target = function elif op_type == "call_method": target = function.__name__ elif op_type == "get_attr": target = function.__name__ else: raise ValueError(f"op_type {op_type} not supported.") return tracer.create_proxy(op_type, target, args, kwargs, proxy_factory_fn=proxy_factory_fn) else: return function(*args, **kwargs) return wrapper class HFProxyableClassMeta(type): """ Metaclass that creates a class with its main methods wrapped to be proxyable. """ def __new__( cls, name: str, bases: Tuple[Type, ...], attrs: Dict[str, Any], proxy_factory_fn: Optional[Callable[[Node], Proxy]] = None, ): cls = super().__new__(cls, name, bases, attrs) for attr_name in dir(cls): attr = getattr(cls, attr_name, None) if attr is None: continue if attr_name == "__init__": op_type = "call_function" elif attr_name.startswith("__"): op_type = None elif inspect.ismethod(attr): op_type = "call_function" elif inspect.isfunction(attr): op_type = "call_method" else: op_type = None if op_type is not None: setattr(cls, attr_name, create_wrapper(attr, op_type, proxy_factory_fn=proxy_factory_fn)) return cls def gen_constructor_wrapper(target: Callable) -> Tuple[Callable, Callable]: """ Wraps `target` to be proxyable. Used for tensor creators like `torch.ones`, `torch.arange` and so on. """ wrapper = create_wrapper(target, "call_function") return wrapper, target def _proxies_to_metas(v): """Returns the underlying metadata for HFProxies, and behaves like the identity for the others.""" if isinstance(v, MetaDeviceAttribute): return "meta" if isinstance(v, torch.fx.Proxy): if not (isinstance(v, HFProxy) and hasattr(v, "_metadata")): raise RuntimeError(f"No metadata was found for {v}") return v._metadata return v def create_cache_proxy_factory_fn(orig_cache_cls: Type[Cache]) -> Callable[[Node], HFCacheProxy]: def cache_proxy_factory_fn(n: Node) -> HFCacheProxy: global _CURRENT_TRACER if not isinstance(_CURRENT_TRACER, HFTracer): raise RuntimeError("Cannot create HFCacheProxy because there is no HFTracer currently tracing.") cache_proxy = HFCacheProxy(n, _CURRENT_TRACER) cache_proxy.install_orig_cache_cls(orig_cache_cls) return cache_proxy return cache_proxy_factory_fn # Proxyable equivalent of the cache classes defined in `transformers.cache_utils`. ProxyableCache = HFProxyableClassMeta( "ProxyableCache", (Cache,), {}, proxy_factory_fn=create_cache_proxy_factory_fn(Cache) ) ProxyableDynamicCache = HFProxyableClassMeta( "ProxyableDynamicCache", (DynamicCache,), {}, proxy_factory_fn=create_cache_proxy_factory_fn(DynamicCache), ) ProxyableSinkCache = HFProxyableClassMeta( "ProxyableSinkCache", (SinkCache,), {}, proxy_factory_fn=create_cache_proxy_factory_fn(SinkCache), ) ProxyableStaticCache = HFProxyableClassMeta( "ProxyableStaticCache", (StaticCache,), {}, proxy_factory_fn=create_cache_proxy_factory_fn(StaticCache), ) def _generate_random_int(low: int = 10, high: int = 20, forbidden_values: Optional[List[int]] = None): if forbidden_values is None: forbidden_values = [] value = random.randint(low, high) while value in forbidden_values: value = random.randint(low, high) return value class HFTracer(Tracer): """ Tracer that is able to symbolically trace models from the library. To do that, it uses the HFProxy instead of the regular PyTorch torch.fx.Proxy. """ # Feature flag for proxying accesses to buffer values proxy_buffer_attributes: bool = True allow_insert_stateless_mods: bool = True _TORCH_METHODS_TO_PATCH = [ "arange", "zeros", "ones", "full", "full_like", "eye", "empty", "tensor", "clamp", "finfo", "tril", ] _CLASSES_TO_PATCH = { Cache: ProxyableCache, DynamicCache: ProxyableDynamicCache, SinkCache: ProxyableSinkCache, StaticCache: ProxyableStaticCache, } supported_archs = (PreTrainedModel,) if not is_peft_available() else (PreTrainedModel, PeftModel) def __init__(self, autowrap_modules=(math,), autowrap_functions=()): super().__init__(autowrap_modules=autowrap_modules, autowrap_functions=autowrap_functions) if not is_torch_fx_available(): raise ImportError( f"Found an incompatible version of torch. Found version {get_torch_version()}, but only version " f"{TORCH_FX_REQUIRED_VERSION} is supported." ) def _generate_dummy_input( self, model: "PreTrainedModel", input_name: str, shape: List[int], input_names: List[str] ) -> Dict[str, torch.Tensor]: """Generates dummy input for model inference recording.""" # Retrieving the model class, either from the "class_for_deserialization" attribute if the model was restored # from pickle, or from the "__class__" attribute in the general case. model_class_name = getattr(model, "class_for_deserialization", model.__class__).__name__ device = model.device inputs_dict = {} # when tracing a model with KV cache, we simply need to unsure that the KV cache length is larger than one to # rightfully pass certain controlflows (Example: https://github.com/huggingface/transformers/blob/5c8d941d66734811d2ef6f57f15b44f7fb7a98c4/src/transformers/modeling_attn_mask_utils.py#L162). # After tracing, the model can then still be used with arbitrary lengths different than the one used during tracing. kv_cache_length = 5 if input_name in ["labels", "start_positions", "end_positions"]: batch_size = shape[0] if model_class_name in [ *get_values(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES), *get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES), *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES), *get_values(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES), ]: inputs_dict["labels"] = torch.zeros(batch_size, dtype=torch.long, device=device) elif model_class_name in [ *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES), *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES), "XLNetForQuestionAnswering", ]: inputs_dict["start_positions"] = torch.zeros(batch_size, dtype=torch.long, device=device) inputs_dict["end_positions"] = torch.zeros(batch_size, dtype=torch.long, device=device) elif model_class_name in get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES): if not hasattr(model.config, "problem_type") or model.config.problem_type is None: raise ValueError( "Could not retrieve the problem type for the sequence classification task, please set " 'model.config.problem_type to one of the following values: "regression", ' '"single_label_classification", or "multi_label_classification".' ) if model.config.problem_type == "regression": labels_shape = (batch_size, model.config.num_labels) labels_dtype = torch.float32 elif model.config.problem_type == "single_label_classification": labels_shape = (batch_size,) labels_dtype = torch.long elif model.config.problem_type == "multi_label_classification": labels_shape = (batch_size, model.config.num_labels) labels_dtype = torch.float32 else: raise ValueError( 'Expected model.config.problem_type to be either: "regression", "single_label_classification"' f', or "multi_label_classification", but "{model.config.problem_type}" was provided.' ) inputs_dict["labels"] = torch.zeros(*labels_shape, dtype=labels_dtype, device=device) elif model_class_name in [ *get_values(MODEL_FOR_PRETRAINING_MAPPING_NAMES), *get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_CAUSAL_LM_MAPPING_NAMES), *get_values(MODEL_FOR_MASKED_LM_MAPPING_NAMES), *get_values(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES), *get_values(MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES), "GPT2DoubleHeadsModel", "PeftModelForCausalLM", "PeftModelForSeq2SeqLM", ]: inputs_dict["labels"] = torch.zeros(shape, dtype=torch.long, device=device) elif model_class_name in [*get_values(MODEL_FOR_CTC_MAPPING_NAMES)]: inputs_dict["labels"] = torch.zeros(shape, dtype=torch.float32, device=device) else: raise NotImplementedError( f"Generating the dummy input named {input_name} for {model_class_name} is not supported yet." ) elif "pixel_values" in input_name: batch_size = shape[0] image_size = getattr(model.config, "image_size", None) if image_size is None: if hasattr(model.config, "vision_config"): image_size = model.config.vision_config.image_size elif hasattr(model.config, "encoder"): image_size = model.config.encoder.image_size else: image_size = (_generate_random_int(), _generate_random_int()) # If no num_channels is in the config, use some arbitrary value. num_channels = getattr(model.config, "num_channels", 3) if not isinstance(image_size, collections.abc.Iterable): image_size = (image_size, image_size) height, width = image_size inputs_dict[input_name] = torch.zeros( batch_size, num_channels, height, width, dtype=torch.float32, device=device ) elif "bbox" in input_name: inputs_dict[input_name] = torch.zeros(*shape, 4, dtype=torch.float, device=device) elif "input_features" in input_name: inputs_dict[input_name] = torch.zeros( *shape, model.config.input_feat_per_channel, dtype=torch.float, device=device ) elif "visual_feats" in input_name: inputs_dict[input_name] = torch.zeros( shape + [ model.config.visual_feat_dim, ], dtype=torch.float, device=device, ) elif "visual_pos" in input_name: inputs_dict[input_name] = torch.zeros( shape + [ model.config.visual_pos_dim, ], dtype=torch.float, device=device, ) elif "inputs" in input_name: inputs_dict[input_name] = torch.zeros(*shape, dtype=torch.float, device=device) elif "input_values" in input_name: batch_size, _ = shape # Generating big sequence length for audio inputs. seq_length = _generate_random_int(low=10000, high=20000) inputs_dict[input_name] = torch.zeros(batch_size, seq_length, dtype=torch.float, device=device) elif "mask" in input_name: if "past_key_values" in input_names: mask_shape = [shape[0], shape[1] + kv_cache_length] else: mask_shape = shape inputs_dict[input_name] = torch.zeros(mask_shape, dtype=torch.long, device=device) elif "ids" in input_name: inputs_dict[input_name] = torch.zeros(shape, dtype=torch.long, device=device) elif "past_key_values" in input_name: if model.config.model_type not in _FX_SUPPORTED_MODELS_WITH_KV_CACHE: raise NotImplementedError( f"Symbolic trace with past_key_values input is not supported yet for the model {model.config.model_type}. Please open an issue or a PR in Transformers repository if you would like to see the support added." ) num_heads = model.config.num_attention_heads head_dim = model.config.hidden_size // model.config.num_attention_heads cache_shape = (shape[0], num_heads, kv_cache_length, head_dim) pkv = tuple( ( torch.rand(cache_shape, dtype=torch.float, device=device), torch.rand(cache_shape, dtype=torch.float, device=device), ) for i in range(model.config.num_hidden_layers) ) inputs_dict[input_name] = pkv else: shape_with_hidden_size = shape + [model.config.hidden_size] inputs_dict[input_name] = torch.zeros(shape_with_hidden_size, dtype=torch.float, device=device) return inputs_dict def create_proxy(self, kind, target, args, kwargs, name=None, type_expr=None, proxy_factory_fn=None): rv = super().create_proxy(kind, target, args, kwargs, name, type_expr, proxy_factory_fn) if kind == "placeholder" and target in self.meta_args: rv.install_metadata(self.meta_args[target]) return rv if target in self.orig_fns: # NOTE: tensor constructors in PyTorch define the `device` argument as # *kwargs-only*. That is why this works. If you add methods to # _TORCH_METHODS_TO_PATCH that do not define `device` as kwarg-only, # this will break and you will likely see issues where we cannot infer # the size of the output. if "device" in kwargs: kwargs["device"] = "meta" try: args_metas = torch.fx.node.map_aggregate(args, _proxies_to_metas) kwargs_metas = torch.fx.node.map_aggregate(kwargs, _proxies_to_metas) should_install_metadata = True self._disable_module_getattr = True self._disable_call_module = True if kind == "call_function": meta_target = _MANUAL_META_OVERRIDES.get(target, target) meta_out = meta_target(*args_metas, **kwargs_metas) if isinstance(meta_out, torch.Tensor): meta_out = meta_out.to(device="meta") elif kind == "call_method": method = getattr(args_metas[0].__class__, target) meta_target = _MANUAL_META_OVERRIDES.get(method, method) meta_out = meta_target(*args_metas, **kwargs_metas) elif kind == "call_module": if not hasattr(self, "orig_forward"): raise AttributeError(f"{self} does not have an attribute called orig_forward") mod = self.root.get_submodule(target) mod_type = type(mod) if mod_type in _MANUAL_META_OVERRIDES: meta_out = _MANUAL_META_OVERRIDES[mod_type](mod, *args_metas, **kwargs_metas) else: meta_out = self.orig_forward(*args_metas, **kwargs_metas) elif kind == "get_attr": attr_itr = self.root atoms = target.split(".") for atom in atoms: attr_itr = getattr(attr_itr, atom) if isinstance(attr_itr, torch.Tensor): meta_out = attr_itr.to(device="meta") else: meta_out = attr_itr else: should_install_metadata = False if should_install_metadata: if not isinstance(rv, Proxy): raise ValueError("Don't support composite output yet") rv.install_metadata(meta_out) except Exception as e: if _IS_IN_DEBUG_MODE: warnings.warn(f"Could not compute metadata for {kind} target {target}: {e}") self._disable_module_getattr = False self._disable_call_module = False return rv # Replaced by .getattr from PyTorch 1.13 def _module_getattr(self, attr, attr_val, parameter_proxy_cache): if getattr(self, "_disable_module_getattr", False): return attr_val else: def maybe_get_proxy_for_attr(attr_val, collection_to_search, parameter_proxy_cache): for n, p in collection_to_search: if attr_val is p: if n not in parameter_proxy_cache: kwargs = {} if "proxy_factory_fn" in inspect.signature(self.create_proxy).parameters: kwargs["proxy_factory_fn"] = ( None if not self.param_shapes_constant else lambda node: ParameterProxy(self, node, n, attr_val) ) val_proxy = self.create_proxy("get_attr", n, (), {}, **kwargs) # type: ignore[arg-type] parameter_proxy_cache[n] = val_proxy return parameter_proxy_cache[n] return None if isinstance(attr_val, torch.nn.Parameter): maybe_parameter_proxy = maybe_get_proxy_for_attr( attr_val, self.root.named_parameters(), parameter_proxy_cache ) if maybe_parameter_proxy is not None: return maybe_parameter_proxy if self.proxy_buffer_attributes and isinstance(attr_val, torch.Tensor): maybe_buffer_proxy = maybe_get_proxy_for_attr( attr_val, self.root.named_buffers(), parameter_proxy_cache ) if maybe_buffer_proxy is not None: return maybe_buffer_proxy return attr_val # Needed for PyTorch 1.13+ def getattr(self, attr: str, attr_val: Any, parameter_proxy_cache: Dict[str, Any]): return self._module_getattr(attr, attr_val, parameter_proxy_cache) def call_module(self, m, forward, args, kwargs): if getattr(self, "_disable_call_module", False): return forward(*args, **kwargs) self.orig_forward = forward return super().call_module(m, forward, args, kwargs) def proxy(self, node): return HFProxy(node, self) @contextlib.contextmanager def patch_for_tracing(self, root: Union[torch.nn.Module, Callable[..., Any]]): # Patching torch functions self.patched_torch_methods = { target: gen_constructor_wrapper(getattr(torch, target)) for target in self._TORCH_METHODS_TO_PATCH } self.orig_fns = set() for name, (wrapper, orig) in self.patched_torch_methods.items(): setattr(torch, name, wrapper) self.orig_fns.add(orig) # Patching classes patched = [] module_of_model = inspect.getmodule(root) for name, mod in sys.modules.items(): if module_of_model is not None and mod is not module_of_model: continue if not name.startswith("transformers"): continue for orig_cls, patched_cls in self._CLASSES_TO_PATCH.items(): for attr_name, attr in mod.__dict__.items(): if attr is orig_cls: patched.append((mod, attr_name, orig_cls)) setattr(mod, attr_name, patched_cls) yield # Restoring patched functions and classes. for name, (_, orig) in self.patched_torch_methods.items(): setattr(torch, name, orig) self.patched_torch_methods = {} self.orig_fns = set() for mod, attr_name, orig_cls in patched: setattr(mod, attr_name, orig_cls) def trace( self, root: Union[torch.nn.Module, Callable[..., Any]], concrete_args: Optional[Dict[str, Any]] = None, dummy_inputs: Optional[Dict[str, Any]] = None, complete_concrete_args_with_inputs_not_in_dummy_inputs: bool = True, ) -> Graph: """ Traces `root` and returns the corresponding FX `torch.fx.Graph` representation. `root` can either be a `torch.nn.Module` instance or a Python callable. Note that after this call, `self.root` may be different from the `root` passed in here. For example, when a free function is passed to `trace()`, we will create a `torch.nn.Module` instance to use as the root and add embedded constants to. Args: root (`torch.nn.Module` or `Callable`): Either a `torch.nn.Module`` or a function to be traced through. If root is not a [`~transformers.PreTrainedModel`], then `dummy_inputs` must be passed, otherwise tracing will fail. concrete_args (`Dict[str, Any], *optional*): Concrete arguments that should not be treated as Proxies dummy_inputs (`Dict[str, Any]`, *optional*): The dummy inputs needed to handle data-dependent control-flow if `root` is not a [`~transformers.PreTrainedModel`]. It can also be used when `root` is a [`~transformers.PreTrainedModel`] to specify custom dummy inputs for a subset or all the model inputs. complete_concrete_args_with_inputs_not_in_dummy_inputs (`bool`, *optional*, defaults to `True`): If `True`, and `dummy_inputs` is specified, every argument that `root` can take that is not in `dummy_inputs` and not in `concrete_args` will be added to `concrete_args`, otherwise does nothing. Returns: `torch.fx.Graph`: A FX `torch.fx.Graph` representing the semantics of the passed-in `root`. """ sig = inspect.signature(root.forward if isinstance(root, torch.nn.Module) else root) if concrete_args is None: concrete_args = {} if dummy_inputs is not None and complete_concrete_args_with_inputs_not_in_dummy_inputs: for param in sig.parameters.values(): if param.name in dummy_inputs: continue if param.default is inspect.Parameter.empty: raise ValueError(f"You need to specify a default value for the parameter {param.name}.") concrete_args.update( { p.name: p.default for p in sig.parameters.values() if (p.name not in dummy_inputs and p.name not in concrete_args) } ) input_names = sig.parameters.keys() - concrete_args.keys() # Creating a random input shape to generate dummy inputs. batch_size = _generate_random_int() sequence_length = _generate_random_int() shape = [batch_size, sequence_length] if root.__class__.__name__ in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES): num_choices = _generate_random_int(low=2, high=5) shape.insert(1, num_choices) inputs = dict(dummy_inputs) if dummy_inputs is not None else {} for input_name in input_names: if input_name in inputs: continue # We enforce that root must either be a PreTrainedModel or deserialized from a serialized traced model to # be able to use HFTracer._generate_dummy_input. if isinstance(root, self.supported_archs) or type(root).__qualname__.startswith( ("_deserialize_graph_module", "_CodeOnlyModule") ): inputs.update(self._generate_dummy_input(root, input_name, shape, input_names=input_names)) else: raise RuntimeError( f"Could not generate input named {input_name} for because root is not a" " transformers.PreTrainedModel." ) def to_meta(value): if isinstance(value, torch.Tensor): return value.to("meta") return value concrete_metas = pytree.tree_map(to_meta, inputs) for param in sig.parameters.values(): if param.kind == inspect.Parameter.VAR_KEYWORD and param.name not in input_names: concrete_metas[f"**{param.name}"] = {} self.meta_args = concrete_metas global _CURRENT_TRACER _CURRENT_TRACER = self with self.patch_for_tracing(root): try: self.graph = super().trace(root, concrete_args=concrete_args) finally: _CURRENT_TRACER = None # This is necessary because concrete args are added as input to the traced module since # https://github.com/pytorch/pytorch/pull/55888. for node in self.graph.nodes: if node.op == "placeholder": # Removing default values for inputs as the forward pass will fail with them. if node.target in input_names: node.args = () # Without this, torch.jit.script fails because the inputs type is Optional[torch.Tensor]. # It cannot infer on the attributes and methods the input should have, and fails. node.type = torch.Tensor # It is a concrete arg so it is not used and should be removed. else: to_visit = [node] to_delete = collections.OrderedDict() while to_visit: n = to_visit.pop(0) to_delete[n] = None to_visit += list(n.users.keys()) for user in reversed(to_delete.keys()): self.graph.erase_node(user) # TODO: solves GraphModule creation. # Without this, return type annotation "Tuple" is causing code execution failure. if node.op == "output": node.type = None return self.graph def _stateless_mod_instanciation_depends_on_proxies(self, mod: nn.Module) -> bool: """ Whether the module was instantiated with Proxies. If that is the case, such module cannot be a leaf module because its attributes are input-dependent. """ return any(isinstance(attr, Proxy) for attr in mod.__dict__.values()) def _insert_module_as_submodule(self, mod: nn.Module) -> str: """ Helper method which tries to insert a module that was not declared as submodule. """ # If one of the module attributes is a Proxy, it means that its instantiation is input-dependent. # It is not possible to insert such modules, those should be traced through. if self._stateless_mod_instanciation_depends_on_proxies(mod): return "" idx = 0 mod_name = mod.__class__.__name__.lower() path = f"{mod_name}_{idx}" already_inserted = False while hasattr(self.root, path): if getattr(self.root, path) is mod: already_inserted = True break path = f"{mod_name}_{idx}" idx += 1 # No need to add multiple instances of the same module. if not already_inserted: self.root.add_module(path, mod) return path def path_of_module(self, mod: nn.Module) -> str: """ Helper method to find the qualified name of `mod` in the Module hierarchy of `root`. For example, if `root` has a submodule named `foo`, which has a submodule named `bar`, passing `bar` into this function will return the string "foo.bar". Args: mod (str): The `Module` to retrieve the qualified name for. """ try: return super().path_of_module(mod) except NameError as e: if self.allow_insert_stateless_mods and len(list(mod.parameters())) == 0 and len(list(mod.buffers())) == 0: path = self._insert_module_as_submodule(mod) return path raise e def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool: return (not self._stateless_mod_instanciation_depends_on_proxies(m)) and super().is_leaf_module( m, module_qualified_name ) @compatibility(is_backward_compatible=True) def keys(self, obj: "Proxy") -> Any: """Called when a proxy object is has the keys() method called. This is what happens when ** is called on a proxy. This should return an iterator if ** is supposed to work in your custom tracer. """ attribute = HFAttribute(obj, "keys")() if obj.node.target == "**kwargs": return attribute._metadata return attribute def get_concrete_args(model: nn.Module, input_names: List[str]): sig = inspect.signature(model.forward) if not (set(input_names) <= set(sig.parameters.keys())): formatted_input_names = input_names[0] if len(input_names) == 1 else ", ".join(input_names) formatted_allowed_input_names = ", ".join(sig.parameters.keys()) raise ValueError( f"The model does not have input(s) named: {formatted_input_names}, expected a subset of the following:" f" {formatted_allowed_input_names}" ) return {p.name: p.default for p in sig.parameters.values() if p.name not in input_names} def is_model_supported(model: "PreTrainedModel"): return model.__class__.__name__ in _SUPPORTED_MODELS def check_if_model_is_supported(model: "PreTrainedModel"): if not is_model_supported(model): supported_model_names = ", ".join(_SUPPORTED_MODELS) raise NotImplementedError( f"Model {model.__class__.__name__} is not supported yet, supported models: {supported_model_names}" ) def symbolic_trace( model: "PreTrainedModel", input_names: Optional[List[str]] = None, disable_check: bool = False, tracer_cls: Type[HFTracer] = HFTracer, ) -> GraphModule: """ Performs symbolic tracing on the model. Args: model ([`PretrainedModel`]): The model to trace. input_names (`List[str]`, *optional*): The names of the inputs of the traced model. If unset, model.dummy_inputs.keys() are used instead. disable_check (`bool`, *optional*, defaults to `False`): If `True`, no check is done before trying to trace the model, this is mostly usesul for debugging purposes. tracer_cls (`Type[HFTracer]`, *optional*, defaults to `HFTracer`): The tracer class to use for instantiating the tracer. If unset, `HFTracer` is used instead. Returns: `torch.fx.GraphModule`: A GraphModule constructed by recording operations seen while tracing the model. Example: ```python from transformers.utils.fx import symbolic_trace traced_model = symbolic_trace(model, input_names=["input_ids", "attention_mask", "token_type_ids"]) ``` """ if input_names is None: input_names = model.dummy_inputs.keys() input_names = list(input_names) concrete_args = get_concrete_args(model, input_names) if not disable_check: check_if_model_is_supported(model) if "past_key_values" in input_names and not getattr(model.config, "use_cache", False): logger.warning( "`past_key_values` were specified as input names, but model.config.use_cache = False, this might lead to " "unexpected behavior." ) if "past_key_values" not in input_names and getattr(model.config, "use_cache", False): logger.warning( "`past_key_values` were not specified as input names, but model.config.use_cache = True. Setting " "model.config.use_cache = False." ) model.config.use_cache = False # Tracing. tracer = tracer_cls() traced_graph = tracer.trace(model, concrete_args=concrete_args) traced = torch.fx.GraphModule(model, traced_graph) traced.config = model.config # The model class must be stored as an attribute to allow model deserialization, which uses trace, and thus # _generate_dummy_input, where the model class is needed. traced.class_for_deserialization = model.__class__ traced.device = model.device return traced
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/generic.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Generic utilities """ import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields, is_dataclass from enum import Enum from functools import partial from typing import Any, ContextManager, Iterable, List, Tuple import numpy as np from packaging import version from .import_utils import ( get_torch_version, is_flax_available, is_mlx_available, is_tf_available, is_torch_available, is_torch_fx_proxy, ) class cached_property(property): """ Descriptor that mimics @property but caches output in member variable. From tensorflow_datasets Built-in in functools from Python 3.8. """ def __get__(self, obj, objtype=None): # See docs.python.org/3/howto/descriptor.html#properties if obj is None: return self if self.fget is None: raise AttributeError("unreadable attribute") attr = "__cached_" + self.fget.__name__ cached = getattr(obj, attr, None) if cached is None: cached = self.fget(obj) setattr(obj, attr, cached) return cached # vendored from distutils.util def strtobool(val): """Convert a string representation of truth to true (1) or false (0). True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if 'val' is anything else. """ val = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(f"invalid truth value {val!r}") def infer_framework_from_repr(x): """ Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the frameworks in a smart order, without the need to import the frameworks). """ representation = str(type(x)) if representation.startswith("<class 'torch."): return "pt" elif representation.startswith("<class 'tensorflow."): return "tf" elif representation.startswith("<class 'jax"): return "jax" elif representation.startswith("<class 'numpy."): return "np" elif representation.startswith("<class 'mlx."): return "mlx" def _get_frameworks_and_test_func(x): """ Returns an (ordered since we are in Python 3.7+) dictionary framework to test function, which places the framework we can guess from the repr first, then Numpy, then the others. """ framework_to_test = { "pt": is_torch_tensor, "tf": is_tf_tensor, "jax": is_jax_tensor, "np": is_numpy_array, "mlx": is_mlx_array, } preferred_framework = infer_framework_from_repr(x) # We will test this one first, then numpy, then the others. frameworks = [] if preferred_framework is None else [preferred_framework] if preferred_framework != "np": frameworks.append("np") frameworks.extend([f for f in framework_to_test if f not in [preferred_framework, "np"]]) return {f: framework_to_test[f] for f in frameworks} def is_tensor(x): """ Tests if `x` is a `torch.Tensor`, `tf.Tensor`, `jaxlib.xla_extension.DeviceArray`, `np.ndarray` or `mlx.array` in the order defined by `infer_framework_from_repr` """ # This gives us a smart order to test the frameworks with the corresponding tests. framework_to_test_func = _get_frameworks_and_test_func(x) for test_func in framework_to_test_func.values(): if test_func(x): return True # Tracers if is_torch_fx_proxy(x): return True if is_flax_available(): from jax.core import Tracer if isinstance(x, Tracer): return True return False def _is_numpy(x): return isinstance(x, np.ndarray) def is_numpy_array(x): """ Tests if `x` is a numpy array or not. """ return _is_numpy(x) def _is_torch(x): import torch return isinstance(x, torch.Tensor) def is_torch_tensor(x): """ Tests if `x` is a torch tensor or not. Safe to call even if torch is not installed. """ return False if not is_torch_available() else _is_torch(x) def _is_torch_device(x): import torch return isinstance(x, torch.device) def is_torch_device(x): """ Tests if `x` is a torch device or not. Safe to call even if torch is not installed. """ return False if not is_torch_available() else _is_torch_device(x) def _is_torch_dtype(x): import torch if isinstance(x, str): if hasattr(torch, x): x = getattr(torch, x) else: return False return isinstance(x, torch.dtype) def is_torch_dtype(x): """ Tests if `x` is a torch dtype or not. Safe to call even if torch is not installed. """ return False if not is_torch_available() else _is_torch_dtype(x) def _is_tensorflow(x): import tensorflow as tf return isinstance(x, tf.Tensor) def is_tf_tensor(x): """ Tests if `x` is a tensorflow tensor or not. Safe to call even if tensorflow is not installed. """ return False if not is_tf_available() else _is_tensorflow(x) def _is_tf_symbolic_tensor(x): import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(tf, "is_symbolic_tensor"): return tf.is_symbolic_tensor(x) return type(x) == tf.Tensor def is_tf_symbolic_tensor(x): """ Tests if `x` is a tensorflow symbolic tensor or not (ie. not eager). Safe to call even if tensorflow is not installed. """ return False if not is_tf_available() else _is_tf_symbolic_tensor(x) def _is_jax(x): import jax.numpy as jnp # noqa: F811 return isinstance(x, jnp.ndarray) def is_jax_tensor(x): """ Tests if `x` is a Jax tensor or not. Safe to call even if jax is not installed. """ return False if not is_flax_available() else _is_jax(x) def _is_mlx(x): import mlx.core as mx return isinstance(x, mx.array) def is_mlx_array(x): """ Tests if `x` is a mlx array or not. Safe to call even when mlx is not installed. """ return False if not is_mlx_available() else _is_mlx(x) def to_py_obj(obj): """ Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a python list. """ framework_to_py_obj = { "pt": lambda obj: obj.detach().cpu().tolist(), "tf": lambda obj: obj.numpy().tolist(), "jax": lambda obj: np.asarray(obj).tolist(), "np": lambda obj: obj.tolist(), } if isinstance(obj, (dict, UserDict)): return {k: to_py_obj(v) for k, v in obj.items()} elif isinstance(obj, (list, tuple)): return [to_py_obj(o) for o in obj] # This gives us a smart order to test the frameworks with the corresponding tests. framework_to_test_func = _get_frameworks_and_test_func(obj) for framework, test_func in framework_to_test_func.items(): if test_func(obj): return framework_to_py_obj[framework](obj) # tolist also works on 0d np arrays if isinstance(obj, np.number): return obj.tolist() else: return obj def to_numpy(obj): """ Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a Numpy array. """ framework_to_numpy = { "pt": lambda obj: obj.detach().cpu().numpy(), "tf": lambda obj: obj.numpy(), "jax": lambda obj: np.asarray(obj), "np": lambda obj: obj, } if isinstance(obj, (dict, UserDict)): return {k: to_numpy(v) for k, v in obj.items()} elif isinstance(obj, (list, tuple)): return np.array(obj) # This gives us a smart order to test the frameworks with the corresponding tests. framework_to_test_func = _get_frameworks_and_test_func(obj) for framework, test_func in framework_to_test_func.items(): if test_func(obj): return framework_to_numpy[framework](obj) return obj class ModelOutput(OrderedDict): """ Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular python dictionary. <Tip warning={true}> You can't unpack a `ModelOutput` directly. Use the [`~utils.ModelOutput.to_tuple`] method to convert it to a tuple before. </Tip> """ def __init_subclass__(cls) -> None: """Register subclasses as pytree nodes. This is necessary to synchronize gradients when using `torch.nn.parallel.DistributedDataParallel` with `static_graph=True` with modules that output `ModelOutput` subclasses. """ if is_torch_available(): if version.parse(get_torch_version()) >= version.parse("2.2"): _torch_pytree.register_pytree_node( cls, _model_output_flatten, partial(_model_output_unflatten, output_type=cls), serialized_type_name=f"{cls.__module__}.{cls.__name__}", ) else: _torch_pytree._register_pytree_node( cls, _model_output_flatten, partial(_model_output_unflatten, output_type=cls), ) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Subclasses of ModelOutput must use the @dataclass decorator # This check is done in __init__ because the @dataclass decorator operates after __init_subclass__ # issubclass() would return True for issubclass(ModelOutput, ModelOutput) when False is needed # Just need to check that the current class is not ModelOutput is_modeloutput_subclass = self.__class__ != ModelOutput if is_modeloutput_subclass and not is_dataclass(self): raise TypeError( f"{self.__module__}.{self.__class__.__name__} is not a dataclasss." " This is a subclass of ModelOutput and so must use the @dataclass decorator." ) def __post_init__(self): """Check the ModelOutput dataclass. Only occurs if @dataclass decorator has been used. """ class_fields = fields(self) # Safety and consistency checks if not len(class_fields): raise ValueError(f"{self.__class__.__name__} has no fields.") if not all(field.default is None for field in class_fields[1:]): raise ValueError(f"{self.__class__.__name__} should not have more than one required field.") first_field = getattr(self, class_fields[0].name) other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:]) if other_fields_are_none and not is_tensor(first_field): if isinstance(first_field, dict): iterator = first_field.items() first_field_iterator = True else: try: iterator = iter(first_field) first_field_iterator = True except TypeError: first_field_iterator = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(iterator): if ( not isinstance(element, (list, tuple)) or not len(element) == 2 or not isinstance(element[0], str) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute self[class_fields[0].name] = first_field else: # If we have a mixed iterator, raise an error raise ValueError( f"Cannot set key/value for {element}. It needs to be a tuple (key, value)." ) break setattr(self, element[0], element[1]) if element[1] is not None: self[element[0]] = element[1] elif first_field is not None: self[class_fields[0].name] = first_field else: for field in class_fields: v = getattr(self, field.name) if v is not None: self[field.name] = v def __delitem__(self, *args, **kwargs): raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.") def setdefault(self, *args, **kwargs): raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.") def pop(self, *args, **kwargs): raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.") def update(self, *args, **kwargs): raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.") def __getitem__(self, k): if isinstance(k, str): inner_dict = dict(self.items()) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__(self, name, value): if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(name, value) super().__setattr__(name, value) def __setitem__(self, key, value): # Will raise a KeyException if needed super().__setitem__(key, value) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(key, value) def __reduce__(self): if not is_dataclass(self): return super().__reduce__() callable, _args, *remaining = super().__reduce__() args = tuple(getattr(self, field.name) for field in fields(self)) return callable, args, *remaining def to_tuple(self) -> Tuple[Any]: """ Convert self to a tuple containing all the attributes/keys that are not `None`. """ return tuple(self[k] for k in self.keys()) if is_torch_available(): import torch.utils._pytree as _torch_pytree def _model_output_flatten(output: ModelOutput) -> Tuple[List[Any], "_torch_pytree.Context"]: return list(output.values()), list(output.keys()) def _model_output_unflatten( values: Iterable[Any], context: "_torch_pytree.Context", output_type=None, ) -> ModelOutput: return output_type(**dict(zip(context, values))) if version.parse(get_torch_version()) >= version.parse("2.2"): _torch_pytree.register_pytree_node( ModelOutput, _model_output_flatten, partial(_model_output_unflatten, output_type=ModelOutput), serialized_type_name=f"{ModelOutput.__module__}.{ModelOutput.__name__}", ) else: _torch_pytree._register_pytree_node( ModelOutput, _model_output_flatten, partial(_model_output_unflatten, output_type=ModelOutput), ) class ExplicitEnum(str, Enum): """ Enum with more explicit error message for missing values. """ @classmethod def _missing_(cls, value): raise ValueError( f"{value} is not a valid {cls.__name__}, please select one of {list(cls._value2member_map_.keys())}" ) class PaddingStrategy(ExplicitEnum): """ Possible values for the `padding` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for tab-completion in an IDE. """ LONGEST = "longest" MAX_LENGTH = "max_length" DO_NOT_PAD = "do_not_pad" class TensorType(ExplicitEnum): """ Possible values for the `return_tensors` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for tab-completion in an IDE. """ PYTORCH = "pt" TENSORFLOW = "tf" NUMPY = "np" JAX = "jax" MLX = "mlx" class ContextManagers: """ Wrapper for `contextlib.ExitStack` which enters a collection of context managers. Adaptation of `ContextManagers` in the `fastcore` library. """ def __init__(self, context_managers: List[ContextManager]): self.context_managers = context_managers self.stack = ExitStack() def __enter__(self): for context_manager in self.context_managers: self.stack.enter_context(context_manager) def __exit__(self, *args, **kwargs): self.stack.__exit__(*args, **kwargs) def can_return_loss(model_class): """ Check if a given model can return loss. Args: model_class (`type`): The class of the model. """ framework = infer_framework(model_class) if framework == "tf": signature = inspect.signature(model_class.call) # TensorFlow models elif framework == "pt": signature = inspect.signature(model_class.forward) # PyTorch models else: signature = inspect.signature(model_class.__call__) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def find_labels(model_class): """ Find the labels used by a given model. Args: model_class (`type`): The class of the model. """ model_name = model_class.__name__ framework = infer_framework(model_class) if framework == "tf": signature = inspect.signature(model_class.call) # TensorFlow models elif framework == "pt": signature = inspect.signature(model_class.forward) # PyTorch models else: signature = inspect.signature(model_class.__call__) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def flatten_dict(d: MutableMapping, parent_key: str = "", delimiter: str = "."): """Flatten a nested dict into a single level dict.""" def _flatten_dict(d, parent_key="", delimiter="."): for k, v in d.items(): key = str(parent_key) + delimiter + str(k) if parent_key else k if v and isinstance(v, MutableMapping): yield from flatten_dict(v, key, delimiter=delimiter).items() else: yield key, v return dict(_flatten_dict(d, parent_key, delimiter)) @contextmanager def working_or_temp_dir(working_dir, use_temp_dir: bool = False): if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def transpose(array, axes=None): """ Framework-agnostic version of `numpy.transpose` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays. """ if is_numpy_array(array): return np.transpose(array, axes=axes) elif is_torch_tensor(array): return array.T if axes is None else array.permute(*axes) elif is_tf_tensor(array): import tensorflow as tf return tf.transpose(array, perm=axes) elif is_jax_tensor(array): import jax.numpy as jnp return jnp.transpose(array, axes=axes) else: raise ValueError(f"Type not supported for transpose: {type(array)}.") def reshape(array, newshape): """ Framework-agnostic version of `numpy.reshape` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays. """ if is_numpy_array(array): return np.reshape(array, newshape) elif is_torch_tensor(array): return array.reshape(*newshape) elif is_tf_tensor(array): import tensorflow as tf return tf.reshape(array, newshape) elif is_jax_tensor(array): import jax.numpy as jnp return jnp.reshape(array, newshape) else: raise ValueError(f"Type not supported for reshape: {type(array)}.") def squeeze(array, axis=None): """ Framework-agnostic version of `numpy.squeeze` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays. """ if is_numpy_array(array): return np.squeeze(array, axis=axis) elif is_torch_tensor(array): return array.squeeze() if axis is None else array.squeeze(dim=axis) elif is_tf_tensor(array): import tensorflow as tf return tf.squeeze(array, axis=axis) elif is_jax_tensor(array): import jax.numpy as jnp return jnp.squeeze(array, axis=axis) else: raise ValueError(f"Type not supported for squeeze: {type(array)}.") def expand_dims(array, axis): """ Framework-agnostic version of `numpy.expand_dims` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays. """ if is_numpy_array(array): return np.expand_dims(array, axis) elif is_torch_tensor(array): return array.unsqueeze(dim=axis) elif is_tf_tensor(array): import tensorflow as tf return tf.expand_dims(array, axis=axis) elif is_jax_tensor(array): import jax.numpy as jnp return jnp.expand_dims(array, axis=axis) else: raise ValueError(f"Type not supported for expand_dims: {type(array)}.") def tensor_size(array): """ Framework-agnostic version of `numpy.size` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays. """ if is_numpy_array(array): return np.size(array) elif is_torch_tensor(array): return array.numel() elif is_tf_tensor(array): import tensorflow as tf return tf.size(array) elif is_jax_tensor(array): return array.size else: raise ValueError(f"Type not supported for tensor_size: {type(array)}.") def add_model_info_to_auto_map(auto_map, repo_id): """ Adds the information of the repo_id to a given auto map. """ for key, value in auto_map.items(): if isinstance(value, (tuple, list)): auto_map[key] = [f"{repo_id}--{v}" if (v is not None and "--" not in v) else v for v in value] elif value is not None and "--" not in value: auto_map[key] = f"{repo_id}--{value}" return auto_map def infer_framework(model_class): """ Infers the framework of a given model without using isinstance(), because we cannot guarantee that the relevant classes are imported or available. """ for base_class in inspect.getmro(model_class): module = base_class.__module__ name = base_class.__name__ if module.startswith("tensorflow") or module.startswith("keras") or name == "TFPreTrainedModel": return "tf" elif module.startswith("torch") or name == "PreTrainedModel": return "pt" elif module.startswith("flax") or module.startswith("jax") or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(f"Could not infer framework from class {model_class}.")
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/notebook.py
# coding=utf-8 # Copyright 2020 Hugging Face # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def format_time(t): "Format `t` (in seconds) to (h):mm:ss" t = int(t) h, m, s = t // 3600, (t // 60) % 60, t % 60 return f"{h}:{m:02d}:{s:02d}" if h != 0 else f"{m:02d}:{s:02d}" def html_progress_bar(value, total, prefix, label, width=300): # docstyle-ignore return f""" <div> {prefix} <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress> {label} </div> """ def text_to_html_table(items): "Put the texts in `items` in an HTML table." html_code = """<table border="1" class="dataframe">\n""" html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += f" <th>{i}</th>\n" html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: elt = f"{elt:.6f}" if isinstance(elt, float) else str(elt) html_code += f" <td>{elt}</td>\n" html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class NotebookProgressBar: """ A progress par for display in a notebook. Class attributes (overridden by derived classes) - **warmup** (`int`) -- The number of iterations to do at the beginning while ignoring `update_every`. - **update_every** (`float`) -- Since calling the time takes some time, we only do it every presumed `update_every` seconds. The progress bar uses the average time passed up until now to guess the next value for which it will call the update. Args: total (`int`): The total number of iterations to reach. prefix (`str`, *optional*): A prefix to add before the progress bar. leave (`bool`, *optional*, defaults to `True`): Whether or not to leave the progress bar once it's completed. You can always call the [`~utils.notebook.NotebookProgressBar.close`] method to make the bar disappear. parent ([`~notebook.NotebookTrainingTracker`], *optional*): A parent object (like [`~utils.notebook.NotebookTrainingTracker`]) that spawns progress bars and handle their display. If set, the object passed must have a `display()` method. width (`int`, *optional*, defaults to 300): The width (in pixels) that the bar will take. Example: ```python import time pbar = NotebookProgressBar(100) for val in range(100): pbar.update(val) time.sleep(0.07) pbar.update(100) ```""" warmup = 5 update_every = 0.2 def __init__( self, total: int, prefix: Optional[str] = None, leave: bool = True, parent: Optional["NotebookTrainingTracker"] = None, width: int = 300, ): self.total = total self.prefix = "" if prefix is None else prefix self.leave = leave self.parent = parent self.width = width self.last_value = None self.comment = None self.output = None def update(self, value: int, force_update: bool = False, comment: str = None): """ The main method to update the progress bar to `value`. Args: value (`int`): The value to use. Must be between 0 and `total`. force_update (`bool`, *optional*, defaults to `False`): Whether or not to force and update of the internal state and display (by default, the bar will wait for `value` to reach the value it predicted corresponds to a time of more than the `update_every` attribute since the last update to avoid adding boilerplate). comment (`str`, *optional*): A comment to add on the left of the progress bar. """ self.value = value if comment is not None: self.comment = comment if self.last_value is None: self.start_time = self.last_time = time.time() self.start_value = self.last_value = value self.elapsed_time = self.predicted_remaining = None self.first_calls = self.warmup self.wait_for = 1 self.update_bar(value) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total): if self.first_calls > 0: self.first_calls -= 1 current_time = time.time() self.elapsed_time = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: self.average_time_per_item = self.elapsed_time / (value - self.start_value) else: self.average_time_per_item = None if value >= self.total: value = self.total self.predicted_remaining = None if not self.leave: self.close() elif self.average_time_per_item is not None: self.predicted_remaining = self.average_time_per_item * (self.total - value) self.update_bar(value) self.last_value = value self.last_time = current_time if (self.average_time_per_item is None) or (self.average_time_per_item == 0): self.wait_for = 1 else: self.wait_for = max(int(self.update_every / self.average_time_per_item), 1) def update_bar(self, value, comment=None): spaced_value = " " * (len(str(self.total)) - len(str(value))) + str(value) if self.elapsed_time is None: self.label = f"[{spaced_value}/{self.total} : < :" elif self.predicted_remaining is None: self.label = f"[{spaced_value}/{self.total} {format_time(self.elapsed_time)}" else: self.label = ( f"[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <" f" {format_time(self.predicted_remaining)}" ) if self.average_time_per_item == 0: self.label += ", +inf it/s" else: self.label += f", {1/self.average_time_per_item:.2f} it/s" self.label += "]" if self.comment is None or len(self.comment) == 0 else f", {self.comment}]" self.display() def display(self): self.html_code = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: self.output = disp.display(disp.HTML(self.html_code), display_id=True) else: self.output.update(disp.HTML(self.html_code)) def close(self): "Closes the progress bar." if self.parent is None and self.output is not None: self.output.update(disp.HTML("")) class NotebookTrainingTracker(NotebookProgressBar): """ An object tracking the updates of an ongoing training with progress bars and a nice table reporting metrics. Args: num_steps (`int`): The number of steps during training. column_names (`List[str]`, *optional*): The list of column names for the metrics table (will be inferred from the first call to [`~utils.notebook.NotebookTrainingTracker.write_line`] if not set). """ def __init__(self, num_steps, column_names=None): super().__init__(num_steps) self.inner_table = None if column_names is None else [column_names] self.child_bar = None def display(self): self.html_code = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: self.output = disp.display(disp.HTML(self.html_code), display_id=True) else: self.output.update(disp.HTML(self.html_code)) def write_line(self, values): """ Write the values in the inner table. Args: values (`Dict[str, float]`): The values to display. """ if self.inner_table is None: self.inner_table = [list(values.keys()), list(values.values())] else: columns = self.inner_table[0] for key in values.keys(): if key not in columns: columns.append(key) self.inner_table[0] = columns if len(self.inner_table) > 1: last_values = self.inner_table[-1] first_column = self.inner_table[0][0] if last_values[0] != values[first_column]: # write new line self.inner_table.append([values[c] if c in values else "No Log" for c in columns]) else: # update last line new_values = values for c in columns: if c not in new_values.keys(): new_values[c] = last_values[columns.index(c)] self.inner_table[-1] = [new_values[c] for c in columns] else: self.inner_table.append([values[c] for c in columns]) def add_child(self, total, prefix=None, width=300): """ Add a child progress bar displayed under the table of metrics. The child progress bar is returned (so it can be easily updated). Args: total (`int`): The number of iterations for the child progress bar. prefix (`str`, *optional*): A prefix to write on the left of the progress bar. width (`int`, *optional*, defaults to 300): The width (in pixels) of the progress bar. """ self.child_bar = NotebookProgressBar(total, prefix=prefix, parent=self, width=width) return self.child_bar def remove_child(self): """ Closes the child progress bar. """ self.child_bar = None self.display() class NotebookProgressCallback(TrainerCallback): """ A [`TrainerCallback`] that displays the progress of training or evaluation, optimized for Jupyter Notebooks or Google colab. """ def __init__(self): self.training_tracker = None self.prediction_bar = None self._force_next_update = False def on_train_begin(self, args, state, control, **kwargs): self.first_column = "Epoch" if args.eval_strategy == IntervalStrategy.EPOCH else "Step" self.training_loss = 0 self.last_log = 0 column_names = [self.first_column] + ["Training Loss"] if args.eval_strategy != IntervalStrategy.NO: column_names.append("Validation Loss") self.training_tracker = NotebookTrainingTracker(state.max_steps, column_names) def on_step_end(self, args, state, control, **kwargs): epoch = int(state.epoch) if int(state.epoch) == state.epoch else f"{state.epoch:.2f}" self.training_tracker.update( state.global_step + 1, comment=f"Epoch {epoch}/{state.num_train_epochs}", force_update=self._force_next_update, ) self._force_next_update = False def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs): if not has_length(eval_dataloader): return if self.prediction_bar is None: if self.training_tracker is not None: self.prediction_bar = self.training_tracker.add_child(len(eval_dataloader)) else: self.prediction_bar = NotebookProgressBar(len(eval_dataloader)) self.prediction_bar.update(1) else: self.prediction_bar.update(self.prediction_bar.value + 1) def on_predict(self, args, state, control, **kwargs): if self.prediction_bar is not None: self.prediction_bar.close() self.prediction_bar = None def on_log(self, args, state, control, logs=None, **kwargs): # Only for when there is no evaluation if args.eval_strategy == IntervalStrategy.NO and "loss" in logs: values = {"Training Loss": logs["loss"]} # First column is necessarily Step sine we're not in epoch eval strategy values["Step"] = state.global_step self.training_tracker.write_line(values) def on_evaluate(self, args, state, control, metrics=None, **kwargs): if self.training_tracker is not None: values = {"Training Loss": "No log", "Validation Loss": "No log"} for log in reversed(state.log_history): if "loss" in log: values["Training Loss"] = log["loss"] break if self.first_column == "Epoch": values["Epoch"] = int(state.epoch) else: values["Step"] = state.global_step metric_key_prefix = "eval" for k in metrics: if k.endswith("_loss"): metric_key_prefix = re.sub(r"\_loss$", "", k) _ = metrics.pop("total_flos", None) _ = metrics.pop("epoch", None) _ = metrics.pop(f"{metric_key_prefix}_runtime", None) _ = metrics.pop(f"{metric_key_prefix}_samples_per_second", None) _ = metrics.pop(f"{metric_key_prefix}_steps_per_second", None) _ = metrics.pop(f"{metric_key_prefix}_jit_compilation_time", None) for k, v in metrics.items(): splits = k.split("_") name = " ".join([part.capitalize() for part in splits[1:]]) if name == "Loss": # Single dataset name = "Validation Loss" values[name] = v self.training_tracker.write_line(values) self.training_tracker.remove_child() self.prediction_bar = None # Evaluation takes a long time so we should force the next update. self._force_next_update = True def on_train_end(self, args, state, control, **kwargs): self.training_tracker.update( state.global_step, comment=f"Epoch {int(state.epoch)}/{state.num_train_epochs}", force_update=True, ) self.training_tracker = None
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/dummy_detectron2_objects.py
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import requires_backends class LayoutLMv2Model: def __init__(self, *args, **kwargs): requires_backends(self, ["detectron2"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["detectron2"])
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/hub.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Hub utilities: utilities related to download and cache models """ import json import os import re import shutil import sys import tempfile import traceback import warnings from concurrent import futures from pathlib import Path from typing import Dict, List, Optional, Tuple, Union from urllib.parse import urlparse from uuid import uuid4 import huggingface_hub import requests from huggingface_hub import ( _CACHED_NO_EXIST, CommitOperationAdd, ModelCard, ModelCardData, constants, create_branch, create_commit, create_repo, get_hf_file_metadata, hf_hub_download, hf_hub_url, try_to_load_from_cache, ) from huggingface_hub.file_download import REGEX_COMMIT_HASH, http_get from huggingface_hub.utils import ( EntryNotFoundError, GatedRepoError, HFValidationError, LocalEntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, build_hf_headers, hf_raise_for_status, send_telemetry, ) from huggingface_hub.utils._deprecation import _deprecate_method from requests.exceptions import HTTPError from . import __version__, logging from .generic import working_or_temp_dir from .import_utils import ( ENV_VARS_TRUE_VALUES, _tf_version, _torch_version, is_tf_available, is_torch_available, is_training_run_on_sagemaker, ) from .logging import tqdm logger = logging.get_logger(__name__) # pylint: disable=invalid-name _is_offline_mode = True if os.environ.get("TRANSFORMERS_OFFLINE", "0").upper() in ENV_VARS_TRUE_VALUES else False def is_offline_mode(): return _is_offline_mode torch_cache_home = os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) default_cache_path = constants.default_cache_path old_default_cache_path = os.path.join(torch_cache_home, "transformers") # Determine default cache directory. Lots of legacy environment variables to ensure backward compatibility. # The best way to set the cache path is with the environment variable HF_HOME. For more details, checkout this # documentation page: https://huggingface.co/docs/huggingface_hub/package_reference/environment_variables. # # In code, use `HF_HUB_CACHE` as the default cache path. This variable is set by the library and is guaranteed # to be set to the right value. # # TODO: clean this for v5? PYTORCH_PRETRAINED_BERT_CACHE = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", constants.HF_HUB_CACHE) PYTORCH_TRANSFORMERS_CACHE = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE) TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE) # Onetime move from the old location to the new one if no ENV variable has been set. if ( os.path.isdir(old_default_cache_path) and not os.path.isdir(constants.HF_HUB_CACHE) and "PYTORCH_PRETRAINED_BERT_CACHE" not in os.environ and "PYTORCH_TRANSFORMERS_CACHE" not in os.environ and "TRANSFORMERS_CACHE" not in os.environ ): logger.warning( "In Transformers v4.22.0, the default path to cache downloaded models changed from" " '~/.cache/torch/transformers' to '~/.cache/huggingface/hub'. Since you don't seem to have" " overridden and '~/.cache/torch/transformers' is a directory that exists, we're moving it to" " '~/.cache/huggingface/hub' to avoid redownloading models you have already in the cache. You should" " only see this message once." ) shutil.move(old_default_cache_path, constants.HF_HUB_CACHE) HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(constants.HF_HOME, "modules")) TRANSFORMERS_DYNAMIC_MODULE_NAME = "transformers_modules" SESSION_ID = uuid4().hex # Add deprecation warning for old environment variables. for key in ("PYTORCH_PRETRAINED_BERT_CACHE", "PYTORCH_TRANSFORMERS_CACHE", "TRANSFORMERS_CACHE"): if os.getenv(key) is not None: warnings.warn( f"Using `{key}` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead.", FutureWarning, ) S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert" CLOUDFRONT_DISTRIB_PREFIX = "https://cdn.huggingface.co" _staging_mode = os.environ.get("HUGGINGFACE_CO_STAGING", "NO").upper() in ENV_VARS_TRUE_VALUES _default_endpoint = "https://hub-ci.huggingface.co" if _staging_mode else "https://huggingface.co" HUGGINGFACE_CO_RESOLVE_ENDPOINT = _default_endpoint if os.environ.get("HUGGINGFACE_CO_RESOLVE_ENDPOINT", None) is not None: warnings.warn( "Using the environment variable `HUGGINGFACE_CO_RESOLVE_ENDPOINT` is deprecated and will be removed in " "Transformers v5. Use `HF_ENDPOINT` instead.", FutureWarning, ) HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HUGGINGFACE_CO_RESOLVE_ENDPOINT", None) HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HF_ENDPOINT", HUGGINGFACE_CO_RESOLVE_ENDPOINT) HUGGINGFACE_CO_PREFIX = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/{model_id}/resolve/{revision}/{filename}" HUGGINGFACE_CO_EXAMPLES_TELEMETRY = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/examples" def _get_cache_file_to_return( path_or_repo_id: str, full_filename: str, cache_dir: Union[str, Path, None] = None, revision: Optional[str] = None ): # We try to see if we have a cached version (not up to date): resolved_file = try_to_load_from_cache(path_or_repo_id, full_filename, cache_dir=cache_dir, revision=revision) if resolved_file is not None and resolved_file != _CACHED_NO_EXIST: return resolved_file return None def is_remote_url(url_or_filename): parsed = urlparse(url_or_filename) return parsed.scheme in ("http", "https") # TODO: remove this once fully deprecated # TODO? remove from './examples/research_projects/lxmert/utils.py' as well # TODO? remove from './examples/research_projects/visual_bert/utils.py' as well @_deprecate_method(version="4.39.0", message="This method is outdated and does not support the new cache system.") def get_cached_models(cache_dir: Union[str, Path] = None) -> List[Tuple]: """ Returns a list of tuples representing model binaries that are cached locally. Each tuple has shape `(model_url, etag, size_MB)`. Filenames in `cache_dir` are use to get the metadata for each model, only urls ending with *.bin* are added. Args: cache_dir (`Union[str, Path]`, *optional*): The cache directory to search for models within. Will default to the transformers cache if unset. Returns: List[Tuple]: List of tuples each with shape `(model_url, etag, size_MB)` """ if cache_dir is None: cache_dir = TRANSFORMERS_CACHE elif isinstance(cache_dir, Path): cache_dir = str(cache_dir) if not os.path.isdir(cache_dir): return [] cached_models = [] for file in os.listdir(cache_dir): if file.endswith(".json"): meta_path = os.path.join(cache_dir, file) with open(meta_path, encoding="utf-8") as meta_file: metadata = json.load(meta_file) url = metadata["url"] etag = metadata["etag"] if url.endswith(".bin"): size_MB = os.path.getsize(meta_path.strip(".json")) / 1e6 cached_models.append((url, etag, size_MB)) return cached_models def define_sagemaker_information(): try: instance_data = requests.get(os.environ["ECS_CONTAINER_METADATA_URI"]).json() dlc_container_used = instance_data["Image"] dlc_tag = instance_data["Image"].split(":")[1] except Exception: dlc_container_used = None dlc_tag = None sagemaker_params = json.loads(os.getenv("SM_FRAMEWORK_PARAMS", "{}")) runs_distributed_training = True if "sagemaker_distributed_dataparallel_enabled" in sagemaker_params else False account_id = os.getenv("TRAINING_JOB_ARN").split(":")[4] if "TRAINING_JOB_ARN" in os.environ else None sagemaker_object = { "sm_framework": os.getenv("SM_FRAMEWORK_MODULE", None), "sm_region": os.getenv("AWS_REGION", None), "sm_number_gpu": os.getenv("SM_NUM_GPUS", 0), "sm_number_cpu": os.getenv("SM_NUM_CPUS", 0), "sm_distributed_training": runs_distributed_training, "sm_deep_learning_container": dlc_container_used, "sm_deep_learning_container_tag": dlc_tag, "sm_account_id": account_id, } return sagemaker_object def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str: """ Formats a user-agent string with basic info about a request. """ ua = f"transformers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}" if is_torch_available(): ua += f"; torch/{_torch_version}" if is_tf_available(): ua += f"; tensorflow/{_tf_version}" if constants.HF_HUB_DISABLE_TELEMETRY: return ua + "; telemetry/off" if is_training_run_on_sagemaker(): ua += "; " + "; ".join(f"{k}/{v}" for k, v in define_sagemaker_information().items()) # CI will set this value to True if os.environ.get("TRANSFORMERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(user_agent, dict): ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items()) elif isinstance(user_agent, str): ua += "; " + user_agent return ua def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str]) -> Optional[str]: """ Extracts the commit hash from a resolved filename toward a cache file. """ if resolved_file is None or commit_hash is not None: return commit_hash resolved_file = str(Path(resolved_file).as_posix()) search = re.search(r"snapshots/([^/]+)/", resolved_file) if search is None: return None commit_hash = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(commit_hash) else None def cached_file( path_or_repo_id: Union[str, os.PathLike], filename: str, cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, resume_download: bool = False, proxies: Optional[Dict[str, str]] = None, token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, local_files_only: bool = False, subfolder: str = "", repo_type: Optional[str] = None, user_agent: Optional[Union[str, Dict[str, str]]] = None, _raise_exceptions_for_gated_repo: bool = True, _raise_exceptions_for_missing_entries: bool = True, _raise_exceptions_for_connection_errors: bool = True, _commit_hash: Optional[str] = None, **deprecated_kwargs, ) -> Optional[str]: """ Tries to locate a file in a local folder and repo, downloads and cache it if necessary. Args: path_or_repo_id (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a model repo on huggingface.co. - a path to a *directory* potentially containing the file. filename (`str`): The name of the file to locate in `path_or_repo`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. local_files_only (`bool`, *optional*, defaults to `False`): If `True`, will only try to load the tokenizer configuration from local files. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. repo_type (`str`, *optional*): Specify the repo type (useful when downloading from a space for instance). <Tip> Passing `token=True` is required when you want to use a private model. </Tip> Returns: `Optional[str]`: Returns the resolved file (to the cache folder if downloaded from a repo). Examples: ```python # Download a model weight from the Hub and cache it. model_weights_file = cached_file("google-bert/bert-base-uncased", "pytorch_model.bin") ``` """ use_auth_token = deprecated_kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") token = use_auth_token # Private arguments # _raise_exceptions_for_gated_repo: if False, do not raise an exception for gated repo error but return # None. # _raise_exceptions_for_missing_entries: if False, do not raise an exception for missing entries but return # None. # _raise_exceptions_for_connection_errors: if False, do not raise an exception for connection errors but return # None. # _commit_hash: passed when we are chaining several calls to various files (e.g. when loading a tokenizer or # a pipeline). If files are cached for this commit hash, avoid calls to head and get from the cache. if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True if subfolder is None: subfolder = "" path_or_repo_id = str(path_or_repo_id) full_filename = os.path.join(subfolder, filename) if os.path.isdir(path_or_repo_id): resolved_file = os.path.join(os.path.join(path_or_repo_id, subfolder), filename) if not os.path.isfile(resolved_file): if _raise_exceptions_for_missing_entries: raise EnvironmentError( f"{path_or_repo_id} does not appear to have a file named {full_filename}. Checkout " f"'https://huggingface.co/{path_or_repo_id}/tree/{revision}' for available files." ) else: return None return resolved_file if cache_dir is None: cache_dir = TRANSFORMERS_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) if _commit_hash is not None and not force_download: # If the file is cached under that commit hash, we return it directly. resolved_file = try_to_load_from_cache( path_or_repo_id, full_filename, cache_dir=cache_dir, revision=_commit_hash, repo_type=repo_type ) if resolved_file is not None: if resolved_file is not _CACHED_NO_EXIST: return resolved_file elif not _raise_exceptions_for_missing_entries: return None else: raise EnvironmentError(f"Could not locate {full_filename} inside {path_or_repo_id}.") user_agent = http_user_agent(user_agent) try: # Load from URL or cache if already cached resolved_file = hf_hub_download( path_or_repo_id, filename, subfolder=None if len(subfolder) == 0 else subfolder, repo_type=repo_type, revision=revision, cache_dir=cache_dir, user_agent=user_agent, force_download=force_download, proxies=proxies, resume_download=resume_download, token=token, local_files_only=local_files_only, ) except GatedRepoError as e: resolved_file = _get_cache_file_to_return(path_or_repo_id, full_filename, cache_dir, revision) if resolved_file is not None or not _raise_exceptions_for_gated_repo: return resolved_file raise EnvironmentError( "You are trying to access a gated repo.\nMake sure to have access to it at " f"https://huggingface.co/{path_or_repo_id}.\n{str(e)}" ) from e except RepositoryNotFoundError as e: raise EnvironmentError( f"{path_or_repo_id} is not a local folder and is not a valid model identifier " "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token " "having permission to this repo either by logging in with `huggingface-cli login` or by passing " "`token=<your_token>`" ) from e except RevisionNotFoundError as e: raise EnvironmentError( f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists " "for this model name. Check the model page at " f"'https://huggingface.co/{path_or_repo_id}' for available revisions." ) from e except LocalEntryNotFoundError as e: resolved_file = _get_cache_file_to_return(path_or_repo_id, full_filename, cache_dir, revision) if ( resolved_file is not None or not _raise_exceptions_for_missing_entries or not _raise_exceptions_for_connection_errors ): return resolved_file raise EnvironmentError( f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this file, couldn't find it in the" f" cached files and it looks like {path_or_repo_id} is not the path to a directory containing a file named" f" {full_filename}.\nCheckout your internet connection or see how to run the library in offline mode at" " 'https://huggingface.co/docs/transformers/installation#offline-mode'." ) from e except EntryNotFoundError as e: if not _raise_exceptions_for_missing_entries: return None if revision is None: revision = "main" raise EnvironmentError( f"{path_or_repo_id} does not appear to have a file named {full_filename}. Checkout " f"'https://huggingface.co/{path_or_repo_id}/tree/{revision}' for available files." ) from e except HTTPError as err: resolved_file = _get_cache_file_to_return(path_or_repo_id, full_filename, cache_dir, revision) if resolved_file is not None or not _raise_exceptions_for_connection_errors: return resolved_file raise EnvironmentError(f"There was a specific connection error when trying to load {path_or_repo_id}:\n{err}") except HFValidationError as e: raise EnvironmentError( f"Incorrect path_or_model_id: '{path_or_repo_id}'. Please provide either the path to a local folder or the repo_id of a model on the Hub." ) from e return resolved_file # TODO: deprecate `get_file_from_repo` or document it differently? # Docstring is exactly the same as `cached_repo` but behavior is slightly different. If file is missing or if # there is a connection error, `cached_repo` will return None while `get_file_from_repo` will raise an error. # IMO we should keep only 1 method and have a single `raise_error` argument (to be discussed). def get_file_from_repo( path_or_repo: Union[str, os.PathLike], filename: str, cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, resume_download: bool = False, proxies: Optional[Dict[str, str]] = None, token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, local_files_only: bool = False, subfolder: str = "", **deprecated_kwargs, ): """ Tries to locate a file in a local folder and repo, downloads and cache it if necessary. Args: path_or_repo (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a model repo on huggingface.co. - a path to a *directory* potentially containing the file. filename (`str`): The name of the file to locate in `path_or_repo`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. local_files_only (`bool`, *optional*, defaults to `False`): If `True`, will only try to load the tokenizer configuration from local files. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. <Tip> Passing `token=True` is required when you want to use a private model. </Tip> Returns: `Optional[str]`: Returns the resolved file (to the cache folder if downloaded from a repo) or `None` if the file does not exist. Examples: ```python # Download a tokenizer configuration from huggingface.co and cache. tokenizer_config = get_file_from_repo("google-bert/bert-base-uncased", "tokenizer_config.json") # This model does not have a tokenizer config so the result will be None. tokenizer_config = get_file_from_repo("FacebookAI/xlm-roberta-base", "tokenizer_config.json") ``` """ use_auth_token = deprecated_kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") token = use_auth_token return cached_file( path_or_repo_id=path_or_repo, filename=filename, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, token=token, revision=revision, local_files_only=local_files_only, subfolder=subfolder, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, ) def download_url(url, proxies=None): """ Downloads a given url in a temporary file. This function is not safe to use in multiple processes. Its only use is for deprecated behavior allowing to download config/models with a single url instead of using the Hub. Args: url (`str`): The url of the file to download. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. Returns: `str`: The location of the temporary file where the url was downloaded. """ warnings.warn( f"Using `from_pretrained` with the url of a file (here {url}) is deprecated and won't be possible anymore in" " v5 of Transformers. You should host your file on the Hub (hf.co) instead and use the repository ID. Note" " that this is not compatible with the caching system (your file will be downloaded at each execution) or" " multiple processes (each process will download the file in a different temporary file).", FutureWarning, ) tmp_fd, tmp_file = tempfile.mkstemp() with os.fdopen(tmp_fd, "wb") as f: http_get(url, f, proxies=proxies) return tmp_file def has_file( path_or_repo: Union[str, os.PathLike], filename: str, revision: Optional[str] = None, proxies: Optional[Dict[str, str]] = None, token: Optional[Union[bool, str]] = None, **deprecated_kwargs, ): """ Checks if a repo contains a given file without downloading it. Works for remote repos and local folders. <Tip warning={false}> This function will raise an error if the repository `path_or_repo` is not valid or if `revision` does not exist for this repo, but will return False for regular connection errors. </Tip> """ use_auth_token = deprecated_kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") token = use_auth_token if os.path.isdir(path_or_repo): return os.path.isfile(os.path.join(path_or_repo, filename)) url = hf_hub_url(path_or_repo, filename=filename, revision=revision) headers = build_hf_headers(token=token, user_agent=http_user_agent()) r = requests.head(url, headers=headers, allow_redirects=False, proxies=proxies, timeout=10) try: hf_raise_for_status(r) return True except GatedRepoError as e: logger.error(e) raise EnvironmentError( f"{path_or_repo} is a gated repository. Make sure to request access at " f"https://huggingface.co/{path_or_repo} and pass a token having permission to this repo either by " "logging in with `huggingface-cli login` or by passing `token=<your_token>`." ) from e except RepositoryNotFoundError as e: logger.error(e) raise EnvironmentError(f"{path_or_repo} is not a local folder or a valid repository name on 'https://hf.co'.") except RevisionNotFoundError as e: logger.error(e) raise EnvironmentError( f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for this " f"model name. Check the model page at 'https://huggingface.co/{path_or_repo}' for available revisions." ) except requests.HTTPError: # We return false for EntryNotFoundError (logical) as well as any connection error. return False class PushToHubMixin: """ A Mixin containing the functionality to push a model or tokenizer to the hub. """ def _create_repo( self, repo_id: str, private: Optional[bool] = None, token: Optional[Union[bool, str]] = None, repo_url: Optional[str] = None, organization: Optional[str] = None, ) -> str: """ Create the repo if needed, cleans up repo_id with deprecated kwargs `repo_url` and `organization`, retrieves the token. """ if repo_url is not None: warnings.warn( "The `repo_url` argument is deprecated and will be removed in v5 of Transformers. Use `repo_id` " "instead." ) if repo_id is not None: raise ValueError( "`repo_id` and `repo_url` are both specified. Please set only the argument `repo_id`." ) repo_id = repo_url.replace(f"{HUGGINGFACE_CO_RESOLVE_ENDPOINT}/", "") if organization is not None: warnings.warn( "The `organization` argument is deprecated and will be removed in v5 of Transformers. Set your " "organization directly in the `repo_id` passed instead (`repo_id={organization}/{model_id}`)." ) if not repo_id.startswith(organization): if "/" in repo_id: repo_id = repo_id.split("/")[-1] repo_id = f"{organization}/{repo_id}" url = create_repo(repo_id=repo_id, token=token, private=private, exist_ok=True) return url.repo_id def _get_files_timestamps(self, working_dir: Union[str, os.PathLike]): """ Returns the list of files with their last modification timestamp. """ return {f: os.path.getmtime(os.path.join(working_dir, f)) for f in os.listdir(working_dir)} def _upload_modified_files( self, working_dir: Union[str, os.PathLike], repo_id: str, files_timestamps: Dict[str, float], commit_message: Optional[str] = None, token: Optional[Union[bool, str]] = None, create_pr: bool = False, revision: str = None, commit_description: str = None, ): """ Uploads all modified files in `working_dir` to `repo_id`, based on `files_timestamps`. """ if commit_message is None: if "Model" in self.__class__.__name__: commit_message = "Upload model" elif "Config" in self.__class__.__name__: commit_message = "Upload config" elif "Tokenizer" in self.__class__.__name__: commit_message = "Upload tokenizer" elif "FeatureExtractor" in self.__class__.__name__: commit_message = "Upload feature extractor" elif "Processor" in self.__class__.__name__: commit_message = "Upload processor" else: commit_message = f"Upload {self.__class__.__name__}" modified_files = [ f for f in os.listdir(working_dir) if f not in files_timestamps or os.path.getmtime(os.path.join(working_dir, f)) > files_timestamps[f] ] # filter for actual files + folders at the root level modified_files = [ f for f in modified_files if os.path.isfile(os.path.join(working_dir, f)) or os.path.isdir(os.path.join(working_dir, f)) ] operations = [] # upload standalone files for file in modified_files: if os.path.isdir(os.path.join(working_dir, file)): # go over individual files of folder for f in os.listdir(os.path.join(working_dir, file)): operations.append( CommitOperationAdd( path_or_fileobj=os.path.join(working_dir, file, f), path_in_repo=os.path.join(file, f) ) ) else: operations.append( CommitOperationAdd(path_or_fileobj=os.path.join(working_dir, file), path_in_repo=file) ) if revision is not None: create_branch(repo_id=repo_id, branch=revision, token=token, exist_ok=True) logger.info(f"Uploading the following files to {repo_id}: {','.join(modified_files)}") return create_commit( repo_id=repo_id, operations=operations, commit_message=commit_message, commit_description=commit_description, token=token, create_pr=create_pr, revision=revision, ) def push_to_hub( self, repo_id: str, use_temp_dir: Optional[bool] = None, commit_message: Optional[str] = None, private: Optional[bool] = None, token: Optional[Union[bool, str]] = None, max_shard_size: Optional[Union[int, str]] = "5GB", create_pr: bool = False, safe_serialization: bool = True, revision: str = None, commit_description: str = None, tags: Optional[List[str]] = None, **deprecated_kwargs, ) -> str: """ Upload the {object_files} to the 🤗 Model Hub. Parameters: repo_id (`str`): The name of the repository you want to push your {object} to. It should contain your organization name when pushing to a given organization. use_temp_dir (`bool`, *optional*): Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub. Will default to `True` if there is no directory named like `repo_id`, `False` otherwise. commit_message (`str`, *optional*): Message to commit while pushing. Will default to `"Upload {object}"`. private (`bool`, *optional*): Whether or not the repository created should be private. token (`bool` or `str`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url` is not specified. max_shard_size (`int` or `str`, *optional*, defaults to `"5GB"`): Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). We default it to `"5GB"` so that users can easily load models on free-tier Google Colab instances without any CPU OOM issues. create_pr (`bool`, *optional*, defaults to `False`): Whether or not to create a PR with the uploaded files or directly commit. safe_serialization (`bool`, *optional*, defaults to `True`): Whether or not to convert the model weights in safetensors format for safer serialization. revision (`str`, *optional*): Branch to push the uploaded files to. commit_description (`str`, *optional*): The description of the commit that will be created tags (`List[str]`, *optional*): List of tags to push on the Hub. Examples: ```python from transformers import {object_class} {object} = {object_class}.from_pretrained("google-bert/bert-base-cased") # Push the {object} to your namespace with the name "my-finetuned-bert". {object}.push_to_hub("my-finetuned-bert") # Push the {object} to an organization with the name "my-finetuned-bert". {object}.push_to_hub("huggingface/my-finetuned-bert") ``` """ use_auth_token = deprecated_kwargs.pop("use_auth_token", None) ignore_metadata_errors = deprecated_kwargs.pop("ignore_metadata_errors", False) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token repo_path_or_name = deprecated_kwargs.pop("repo_path_or_name", None) if repo_path_or_name is not None: # Should use `repo_id` instead of `repo_path_or_name`. When using `repo_path_or_name`, we try to infer # repo_id from the folder path, if it exists. warnings.warn( "The `repo_path_or_name` argument is deprecated and will be removed in v5 of Transformers. Use " "`repo_id` instead.", FutureWarning, ) if repo_id is not None: raise ValueError( "`repo_id` and `repo_path_or_name` are both specified. Please set only the argument `repo_id`." ) if os.path.isdir(repo_path_or_name): # repo_path: infer repo_id from the path repo_id = repo_id.split(os.path.sep)[-1] working_dir = repo_id else: # repo_name: use it as repo_id repo_id = repo_path_or_name working_dir = repo_id.split("/")[-1] else: # Repo_id is passed correctly: infer working_dir from it working_dir = repo_id.split("/")[-1] # Deprecation warning will be sent after for repo_url and organization repo_url = deprecated_kwargs.pop("repo_url", None) organization = deprecated_kwargs.pop("organization", None) repo_id = self._create_repo( repo_id, private=private, token=token, repo_url=repo_url, organization=organization ) # Create a new empty model card and eventually tag it model_card = create_and_tag_model_card( repo_id, tags, token=token, ignore_metadata_errors=ignore_metadata_errors ) if use_temp_dir is None: use_temp_dir = not os.path.isdir(working_dir) with working_or_temp_dir(working_dir=working_dir, use_temp_dir=use_temp_dir) as work_dir: files_timestamps = self._get_files_timestamps(work_dir) # Save all files. self.save_pretrained(work_dir, max_shard_size=max_shard_size, safe_serialization=safe_serialization) # Update model card if needed: model_card.save(os.path.join(work_dir, "README.md")) return self._upload_modified_files( work_dir, repo_id, files_timestamps, commit_message=commit_message, token=token, create_pr=create_pr, revision=revision, commit_description=commit_description, ) def send_example_telemetry(example_name, *example_args, framework="pytorch"): """ Sends telemetry that helps tracking the examples use. Args: example_name (`str`): The name of the example. *example_args (dataclasses or `argparse.ArgumentParser`): The arguments to the script. This function will only try to extract the model and dataset name from those. Nothing else is tracked. framework (`str`, *optional*, defaults to `"pytorch"`): The framework for the example. """ if is_offline_mode(): return data = {"example": example_name, "framework": framework} for args in example_args: args_as_dict = {k: v for k, v in args.__dict__.items() if not k.startswith("_") and v is not None} if "model_name_or_path" in args_as_dict: model_name = args_as_dict["model_name_or_path"] # Filter out local paths if not os.path.isdir(model_name): data["model_name"] = args_as_dict["model_name_or_path"] if "dataset_name" in args_as_dict: data["dataset_name"] = args_as_dict["dataset_name"] elif "task_name" in args_as_dict: # Extract script name from the example_name script_name = example_name.replace("tf_", "").replace("flax_", "").replace("run_", "") script_name = script_name.replace("_no_trainer", "") data["dataset_name"] = f"{script_name}-{args_as_dict['task_name']}" # Send telemetry in the background send_telemetry( topic="examples", library_name="transformers", library_version=__version__, user_agent=http_user_agent(data) ) def convert_file_size_to_int(size: Union[int, str]): """ Converts a size expressed as a string with digits an unit (like `"5MB"`) to an integer (in bytes). Args: size (`int` or `str`): The size to convert. Will be directly returned if an `int`. Example: ```py >>> convert_file_size_to_int("1MiB") 1048576 ``` """ if isinstance(size, int): return size if size.upper().endswith("GIB"): return int(size[:-3]) * (2**30) if size.upper().endswith("MIB"): return int(size[:-3]) * (2**20) if size.upper().endswith("KIB"): return int(size[:-3]) * (2**10) if size.upper().endswith("GB"): int_size = int(size[:-2]) * (10**9) return int_size // 8 if size.endswith("b") else int_size if size.upper().endswith("MB"): int_size = int(size[:-2]) * (10**6) return int_size // 8 if size.endswith("b") else int_size if size.upper().endswith("KB"): int_size = int(size[:-2]) * (10**3) return int_size // 8 if size.endswith("b") else int_size raise ValueError("`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.") def get_checkpoint_shard_files( pretrained_model_name_or_path, index_filename, cache_dir=None, force_download=False, proxies=None, resume_download=False, local_files_only=False, token=None, user_agent=None, revision=None, subfolder="", _commit_hash=None, **deprecated_kwargs, ): """ For a given model: - download and cache all the shards of a sharded checkpoint if `pretrained_model_name_or_path` is a model ID on the Hub - returns the list of paths to all the shards, as well as some metadata. For the description of each arg, see [`PreTrainedModel.from_pretrained`]. `index_filename` is the full path to the index (downloaded and cached if `pretrained_model_name_or_path` is a model ID on the Hub). """ import json use_auth_token = deprecated_kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") token = use_auth_token if not os.path.isfile(index_filename): raise ValueError(f"Can't find a checkpoint index ({index_filename}) in {pretrained_model_name_or_path}.") with open(index_filename, "r") as f: index = json.loads(f.read()) shard_filenames = sorted(set(index["weight_map"].values())) sharded_metadata = index["metadata"] sharded_metadata["all_checkpoint_keys"] = list(index["weight_map"].keys()) sharded_metadata["weight_map"] = index["weight_map"].copy() # First, let's deal with local folder. if os.path.isdir(pretrained_model_name_or_path): shard_filenames = [os.path.join(pretrained_model_name_or_path, subfolder, f) for f in shard_filenames] return shard_filenames, sharded_metadata # At this stage pretrained_model_name_or_path is a model identifier on the Hub cached_filenames = [] # Check if the model is already cached or not. We only try the last checkpoint, this should cover most cases of # downloaded (if interrupted). last_shard = try_to_load_from_cache( pretrained_model_name_or_path, shard_filenames[-1], cache_dir=cache_dir, revision=_commit_hash ) show_progress_bar = last_shard is None or force_download for shard_filename in tqdm(shard_filenames, desc="Downloading shards", disable=not show_progress_bar): try: # Load from URL cached_filename = cached_file( pretrained_model_name_or_path, shard_filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, revision=revision, subfolder=subfolder, _commit_hash=_commit_hash, ) # We have already dealt with RepositoryNotFoundError and RevisionNotFoundError when getting the index, so # we don't have to catch them here. except EntryNotFoundError: raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named {shard_filename} which is " "required according to the checkpoint index." ) except HTTPError: raise EnvironmentError( f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load {shard_filename}. You should try" " again after checking your internet connection." ) cached_filenames.append(cached_filename) return cached_filenames, sharded_metadata # All what is below is for conversion between old cache format and new cache format. def get_all_cached_files(cache_dir=None): """ Returns a list for all files cached with appropriate metadata. """ if cache_dir is None: cache_dir = TRANSFORMERS_CACHE else: cache_dir = str(cache_dir) if not os.path.isdir(cache_dir): return [] cached_files = [] for file in os.listdir(cache_dir): meta_path = os.path.join(cache_dir, f"{file}.json") if not os.path.isfile(meta_path): continue with open(meta_path, encoding="utf-8") as meta_file: metadata = json.load(meta_file) url = metadata["url"] etag = metadata["etag"].replace('"', "") cached_files.append({"file": file, "url": url, "etag": etag}) return cached_files def extract_info_from_url(url): """ Extract repo_name, revision and filename from an url. """ search = re.search(r"^https://huggingface\.co/(.*)/resolve/([^/]*)/(.*)$", url) if search is None: return None repo, revision, filename = search.groups() cache_repo = "--".join(["models"] + repo.split("/")) return {"repo": cache_repo, "revision": revision, "filename": filename} def create_and_tag_model_card( repo_id: str, tags: Optional[List[str]] = None, token: Optional[str] = None, ignore_metadata_errors: bool = False, ): """ Creates or loads an existing model card and tags it. Args: repo_id (`str`): The repo_id where to look for the model card. tags (`List[str]`, *optional*): The list of tags to add in the model card token (`str`, *optional*): Authentication token, obtained with `huggingface_hub.HfApi.login` method. Will default to the stored token. ignore_metadata_errors (`str`): If True, errors while parsing the metadata section will be ignored. Some information might be lost during the process. Use it at your own risk. """ try: # Check if the model card is present on the remote repo model_card = ModelCard.load(repo_id, token=token, ignore_metadata_errors=ignore_metadata_errors) except EntryNotFoundError: # Otherwise create a simple model card from template model_description = "This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated." card_data = ModelCardData(tags=[] if tags is None else tags, library_name="transformers") model_card = ModelCard.from_template(card_data, model_description=model_description) if tags is not None: for model_tag in tags: if model_tag not in model_card.data.tags: model_card.data.tags.append(model_tag) return model_card def clean_files_for(file): """ Remove, if they exist, file, file.json and file.lock """ for f in [file, f"{file}.json", f"{file}.lock"]: if os.path.isfile(f): os.remove(f) def move_to_new_cache(file, repo, filename, revision, etag, commit_hash): """ Move file to repo following the new huggingface hub cache organization. """ os.makedirs(repo, exist_ok=True) # refs os.makedirs(os.path.join(repo, "refs"), exist_ok=True) if revision != commit_hash: ref_path = os.path.join(repo, "refs", revision) with open(ref_path, "w") as f: f.write(commit_hash) # blobs os.makedirs(os.path.join(repo, "blobs"), exist_ok=True) blob_path = os.path.join(repo, "blobs", etag) shutil.move(file, blob_path) # snapshots os.makedirs(os.path.join(repo, "snapshots"), exist_ok=True) os.makedirs(os.path.join(repo, "snapshots", commit_hash), exist_ok=True) pointer_path = os.path.join(repo, "snapshots", commit_hash, filename) huggingface_hub.file_download._create_relative_symlink(blob_path, pointer_path) clean_files_for(file) def move_cache(cache_dir=None, new_cache_dir=None, token=None): if new_cache_dir is None: new_cache_dir = TRANSFORMERS_CACHE if cache_dir is None: # Migrate from old cache in .cache/huggingface/transformers old_cache = Path(TRANSFORMERS_CACHE).parent / "transformers" if os.path.isdir(str(old_cache)): cache_dir = str(old_cache) else: cache_dir = new_cache_dir cached_files = get_all_cached_files(cache_dir=cache_dir) logger.info(f"Moving {len(cached_files)} files to the new cache system") hub_metadata = {} for file_info in tqdm(cached_files): url = file_info.pop("url") if url not in hub_metadata: try: hub_metadata[url] = get_hf_file_metadata(url, token=token) except requests.HTTPError: continue etag, commit_hash = hub_metadata[url].etag, hub_metadata[url].commit_hash if etag is None or commit_hash is None: continue if file_info["etag"] != etag: # Cached file is not up to date, we just throw it as a new version will be downloaded anyway. clean_files_for(os.path.join(cache_dir, file_info["file"])) continue url_info = extract_info_from_url(url) if url_info is None: # Not a file from huggingface.co continue repo = os.path.join(new_cache_dir, url_info["repo"]) move_to_new_cache( file=os.path.join(cache_dir, file_info["file"]), repo=repo, filename=url_info["filename"], revision=url_info["revision"], etag=etag, commit_hash=commit_hash, ) class PushInProgress: """ Internal class to keep track of a push in progress (which might contain multiple `Future` jobs). """ def __init__(self, jobs: Optional[futures.Future] = None) -> None: self.jobs = [] if jobs is None else jobs def is_done(self): return all(job.done() for job in self.jobs) def wait_until_done(self): futures.wait(self.jobs) def cancel(self) -> None: self.jobs = [ job for job in self.jobs # Cancel the job if it wasn't started yet and remove cancelled/done jobs from the list if not (job.cancel() or job.done()) ] cache_version_file = os.path.join(TRANSFORMERS_CACHE, "version.txt") if not os.path.isfile(cache_version_file): cache_version = 0 else: with open(cache_version_file) as f: try: cache_version = int(f.read()) except ValueError: cache_version = 0 cache_is_not_empty = os.path.isdir(TRANSFORMERS_CACHE) and len(os.listdir(TRANSFORMERS_CACHE)) > 0 if cache_version < 1 and cache_is_not_empty: if is_offline_mode(): logger.warning( "You are offline and the cache for model files in Transformers v4.22.0 has been updated while your local " "cache seems to be the one of a previous version. It is very likely that all your calls to any " "`from_pretrained()` method will fail. Remove the offline mode and enable internet connection to have " "your cache be updated automatically, then you can go back to offline mode." ) else: logger.warning( "The cache for model files in Transformers v4.22.0 has been updated. Migrating your old cache. This is a " "one-time only operation. You can interrupt this and resume the migration later on by calling " "`transformers.utils.move_cache()`." ) try: if TRANSFORMERS_CACHE != constants.HF_HUB_CACHE: # Users set some env variable to customize cache storage move_cache(TRANSFORMERS_CACHE, TRANSFORMERS_CACHE) else: move_cache() except Exception as e: trace = "\n".join(traceback.format_tb(e.__traceback__)) logger.error( f"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease " "file an issue at https://github.com/huggingface/transformers/issues/new/choose and copy paste this whole " "message and we will do our best to help." ) if cache_version < 1: try: os.makedirs(TRANSFORMERS_CACHE, exist_ok=True) with open(cache_version_file, "w") as f: f.write("1") except Exception: logger.warning( f"There was a problem when trying to write in your cache folder ({TRANSFORMERS_CACHE}). You should set " "the environment variable TRANSFORMERS_CACHE to a writable directory." )
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/utils/import_utils.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Import utilities: Utilities related to imports and our lazy inits. """ import importlib.metadata import importlib.util import json import os import shutil import subprocess import sys import warnings from collections import OrderedDict from functools import lru_cache from itertools import chain from types import ModuleType from typing import Any, Tuple, Union from packaging import version from . import logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name # TODO: This doesn't work for all packages (`bs4`, `faiss`, etc.) Talk to Sylvain to see how to do with it better. def _is_package_available(pkg_name: str, return_version: bool = False) -> Union[Tuple[bool, str], bool]: # Check if the package spec exists and grab its version to avoid importing a local directory package_exists = importlib.util.find_spec(pkg_name) is not None package_version = "N/A" if package_exists: try: # Primary method to get the package version package_version = importlib.metadata.version(pkg_name) except importlib.metadata.PackageNotFoundError: # Fallback method: Only for "torch" and versions containing "dev" if pkg_name == "torch": try: package = importlib.import_module(pkg_name) temp_version = getattr(package, "__version__", "N/A") # Check if the version contains "dev" if "dev" in temp_version: package_version = temp_version package_exists = True else: package_exists = False except ImportError: # If the package can't be imported, it's not available package_exists = False else: # For packages other than "torch", don't attempt the fallback and set as not available package_exists = False logger.debug(f"Detected {pkg_name} version: {package_version}") if return_version: return package_exists, package_version else: return package_exists ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"} ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"}) USE_TF = os.environ.get("USE_TF", "AUTO").upper() USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper() USE_JAX = os.environ.get("USE_FLAX", "AUTO").upper() # Try to run a native pytorch job in an environment with TorchXLA installed by setting this value to 0. USE_TORCH_XLA = os.environ.get("USE_TORCH_XLA", "1").upper() FORCE_TF_AVAILABLE = os.environ.get("FORCE_TF_AVAILABLE", "AUTO").upper() # `transformers` requires `torch>=1.11` but this variable is exposed publicly, and we can't simply remove it. # This is the version of torch required to run torch.fx features and torch.onnx with dictionary inputs. TORCH_FX_REQUIRED_VERSION = version.parse("1.10") ACCELERATE_MIN_VERSION = "0.21.0" FSDP_MIN_VERSION = "1.12.0" XLA_FSDPV2_MIN_VERSION = "2.2.0" _accelerate_available, _accelerate_version = _is_package_available("accelerate", return_version=True) _apex_available = _is_package_available("apex") _aqlm_available = _is_package_available("aqlm") _av_available = importlib.util.find_spec("av") is not None _bitsandbytes_available = _is_package_available("bitsandbytes") _eetq_available = _is_package_available("eetq") _galore_torch_available = _is_package_available("galore_torch") # `importlib.metadata.version` doesn't work with `bs4` but `beautifulsoup4`. For `importlib.util.find_spec`, reversed. _bs4_available = importlib.util.find_spec("bs4") is not None _coloredlogs_available = _is_package_available("coloredlogs") # `importlib.metadata.util` doesn't work with `opencv-python-headless`. _cv2_available = importlib.util.find_spec("cv2") is not None _datasets_available = _is_package_available("datasets") _decord_available = importlib.util.find_spec("decord") is not None _detectron2_available = _is_package_available("detectron2") # We need to check both `faiss` and `faiss-cpu`. _faiss_available = importlib.util.find_spec("faiss") is not None try: _faiss_version = importlib.metadata.version("faiss") logger.debug(f"Successfully imported faiss version {_faiss_version}") except importlib.metadata.PackageNotFoundError: try: _faiss_version = importlib.metadata.version("faiss-cpu") logger.debug(f"Successfully imported faiss version {_faiss_version}") except importlib.metadata.PackageNotFoundError: _faiss_available = False _ftfy_available = _is_package_available("ftfy") _g2p_en_available = _is_package_available("g2p_en") _ipex_available, _ipex_version = _is_package_available("intel_extension_for_pytorch", return_version=True) _jieba_available = _is_package_available("jieba") _jinja_available = _is_package_available("jinja2") _kenlm_available = _is_package_available("kenlm") _keras_nlp_available = _is_package_available("keras_nlp") _levenshtein_available = _is_package_available("Levenshtein") _librosa_available = _is_package_available("librosa") _natten_available = _is_package_available("natten") _nltk_available = _is_package_available("nltk") _onnx_available = _is_package_available("onnx") _openai_available = _is_package_available("openai") _optimum_available = _is_package_available("optimum") _auto_gptq_available = _is_package_available("auto_gptq") # `importlib.metadata.version` doesn't work with `awq` _auto_awq_available = importlib.util.find_spec("awq") is not None _quanto_available = _is_package_available("quanto") _pandas_available = _is_package_available("pandas") _peft_available = _is_package_available("peft") _phonemizer_available = _is_package_available("phonemizer") _psutil_available = _is_package_available("psutil") _py3nvml_available = _is_package_available("py3nvml") _pyctcdecode_available = _is_package_available("pyctcdecode") _pytesseract_available = _is_package_available("pytesseract") _pytest_available = _is_package_available("pytest") _pytorch_quantization_available = _is_package_available("pytorch_quantization") _rjieba_available = _is_package_available("rjieba") _sacremoses_available = _is_package_available("sacremoses") _safetensors_available = _is_package_available("safetensors") _scipy_available = _is_package_available("scipy") _sentencepiece_available = _is_package_available("sentencepiece") _is_seqio_available = _is_package_available("seqio") _sklearn_available = importlib.util.find_spec("sklearn") is not None if _sklearn_available: try: importlib.metadata.version("scikit-learn") except importlib.metadata.PackageNotFoundError: _sklearn_available = False _smdistributed_available = importlib.util.find_spec("smdistributed") is not None _soundfile_available = _is_package_available("soundfile") _spacy_available = _is_package_available("spacy") _sudachipy_available, _sudachipy_version = _is_package_available("sudachipy", return_version=True) _tensorflow_probability_available = _is_package_available("tensorflow_probability") _tensorflow_text_available = _is_package_available("tensorflow_text") _tf2onnx_available = _is_package_available("tf2onnx") _timm_available = _is_package_available("timm") _tokenizers_available = _is_package_available("tokenizers") _torchaudio_available = _is_package_available("torchaudio") _torchdistx_available = _is_package_available("torchdistx") _torchvision_available = _is_package_available("torchvision") _mlx_available = _is_package_available("mlx") _hqq_available = _is_package_available("hqq") _torch_version = "N/A" _torch_available = False if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES: _torch_available, _torch_version = _is_package_available("torch", return_version=True) else: logger.info("Disabling PyTorch because USE_TF is set") _torch_available = False _tf_version = "N/A" _tf_available = False if FORCE_TF_AVAILABLE in ENV_VARS_TRUE_VALUES: _tf_available = True else: if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES: # Note: _is_package_available("tensorflow") fails for tensorflow-cpu. Please test any changes to the line below # with tensorflow-cpu to make sure it still works! _tf_available = importlib.util.find_spec("tensorflow") is not None if _tf_available: candidates = ( "tensorflow", "tensorflow-cpu", "tensorflow-gpu", "tf-nightly", "tf-nightly-cpu", "tf-nightly-gpu", "tf-nightly-rocm", "intel-tensorflow", "intel-tensorflow-avx512", "tensorflow-rocm", "tensorflow-macos", "tensorflow-aarch64", ) _tf_version = None # For the metadata, we have to look for both tensorflow and tensorflow-cpu for pkg in candidates: try: _tf_version = importlib.metadata.version(pkg) break except importlib.metadata.PackageNotFoundError: pass _tf_available = _tf_version is not None if _tf_available: if version.parse(_tf_version) < version.parse("2"): logger.info( f"TensorFlow found but with version {_tf_version}. Transformers requires version 2 minimum." ) _tf_available = False else: logger.info("Disabling Tensorflow because USE_TORCH is set") _essentia_available = importlib.util.find_spec("essentia") is not None try: _essentia_version = importlib.metadata.version("essentia") logger.debug(f"Successfully imported essentia version {_essentia_version}") except importlib.metadata.PackageNotFoundError: _essentia_version = False _pretty_midi_available = importlib.util.find_spec("pretty_midi") is not None try: _pretty_midi_version = importlib.metadata.version("pretty_midi") logger.debug(f"Successfully imported pretty_midi version {_pretty_midi_version}") except importlib.metadata.PackageNotFoundError: _pretty_midi_available = False ccl_version = "N/A" _is_ccl_available = ( importlib.util.find_spec("torch_ccl") is not None or importlib.util.find_spec("oneccl_bindings_for_pytorch") is not None ) try: ccl_version = importlib.metadata.version("oneccl_bind_pt") logger.debug(f"Detected oneccl_bind_pt version {ccl_version}") except importlib.metadata.PackageNotFoundError: _is_ccl_available = False _flax_available = False if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES: _flax_available, _flax_version = _is_package_available("flax", return_version=True) if _flax_available: _jax_available, _jax_version = _is_package_available("jax", return_version=True) if _jax_available: logger.info(f"JAX version {_jax_version}, Flax version {_flax_version} available.") else: _flax_available = _jax_available = False _jax_version = _flax_version = "N/A" _torch_fx_available = False if _torch_available: torch_version = version.parse(_torch_version) _torch_fx_available = (torch_version.major, torch_version.minor) >= ( TORCH_FX_REQUIRED_VERSION.major, TORCH_FX_REQUIRED_VERSION.minor, ) _torch_xla_available = False if USE_TORCH_XLA in ENV_VARS_TRUE_VALUES: _torch_xla_available, _torch_xla_version = _is_package_available("torch_xla", return_version=True) if _torch_xla_available: logger.info(f"Torch XLA version {_torch_xla_version} available.") def is_kenlm_available(): return _kenlm_available def is_cv2_available(): return _cv2_available def is_torch_available(): return _torch_available def is_hqq_available(): return _hqq_available def get_torch_version(): return _torch_version def is_torch_sdpa_available(): if not is_torch_available(): return False elif _torch_version == "N/A": return False # NOTE: We require torch>=2.1 (and not torch>=2.0) to use SDPA in Transformers for two reasons: # - Allow the global use of the `scale` argument introduced in https://github.com/pytorch/pytorch/pull/95259 # - Memory-efficient attention supports arbitrary attention_mask: https://github.com/pytorch/pytorch/pull/104310 # NOTE: We require torch>=2.1.1 to avoid a numerical issue in SDPA with non-contiguous inputs: https://github.com/pytorch/pytorch/issues/112577 return version.parse(_torch_version) >= version.parse("2.1.1") def is_torchvision_available(): return _torchvision_available def is_galore_torch_available(): return _galore_torch_available def is_pyctcdecode_available(): return _pyctcdecode_available def is_librosa_available(): return _librosa_available def is_essentia_available(): return _essentia_available def is_pretty_midi_available(): return _pretty_midi_available def is_torch_cuda_available(): if is_torch_available(): import torch return torch.cuda.is_available() else: return False def is_mamba_ssm_available(): if is_torch_available(): import torch if not torch.cuda.is_available(): return False else: return _is_package_available("mamba_ssm") return False def is_causal_conv1d_available(): if is_torch_available(): import torch if not torch.cuda.is_available(): return False return _is_package_available("causal_conv1d") return False def is_torch_mps_available(): if is_torch_available(): import torch if hasattr(torch.backends, "mps"): return torch.backends.mps.is_available() and torch.backends.mps.is_built() return False def is_torch_bf16_gpu_available(): if not is_torch_available(): return False import torch return torch.cuda.is_available() and torch.cuda.is_bf16_supported() def is_torch_bf16_cpu_available(): if not is_torch_available(): return False import torch try: # multiple levels of AttributeError depending on the pytorch version so do them all in one check _ = torch.cpu.amp.autocast except AttributeError: return False return True def is_torch_bf16_available(): # the original bf16 check was for gpu only, but later a cpu/bf16 combo has emerged so this util # has become ambiguous and therefore deprecated warnings.warn( "The util is_torch_bf16_available is deprecated, please use is_torch_bf16_gpu_available " "or is_torch_bf16_cpu_available instead according to whether it's used with cpu or gpu", FutureWarning, ) return is_torch_bf16_gpu_available() @lru_cache() def is_torch_fp16_available_on_device(device): if not is_torch_available(): return False import torch try: x = torch.zeros(2, 2, dtype=torch.float16).to(device) _ = x @ x # At this moment, let's be strict of the check: check if `LayerNorm` is also supported on device, because many # models use this layer. batch, sentence_length, embedding_dim = 3, 4, 5 embedding = torch.randn(batch, sentence_length, embedding_dim, dtype=torch.float16, device=device) layer_norm = torch.nn.LayerNorm(embedding_dim, dtype=torch.float16, device=device) _ = layer_norm(embedding) except: # noqa: E722 # TODO: more precise exception matching, if possible. # most backends should return `RuntimeError` however this is not guaranteed. return False return True @lru_cache() def is_torch_bf16_available_on_device(device): if not is_torch_available(): return False import torch if device == "cuda": return is_torch_bf16_gpu_available() try: x = torch.zeros(2, 2, dtype=torch.bfloat16).to(device) _ = x @ x except: # noqa: E722 # TODO: more precise exception matching, if possible. # most backends should return `RuntimeError` however this is not guaranteed. return False return True def is_torch_tf32_available(): if not is_torch_available(): return False import torch if not torch.cuda.is_available() or torch.version.cuda is None: return False if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8: return False if int(torch.version.cuda.split(".")[0]) < 11: return False if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.7"): return False return True def is_torch_fx_available(): return _torch_fx_available def is_peft_available(): return _peft_available def is_bs4_available(): return _bs4_available def is_tf_available(): return _tf_available def is_coloredlogs_available(): return _coloredlogs_available def is_tf2onnx_available(): return _tf2onnx_available def is_onnx_available(): return _onnx_available def is_openai_available(): return _openai_available def is_flax_available(): return _flax_available def is_ftfy_available(): return _ftfy_available def is_g2p_en_available(): return _g2p_en_available @lru_cache() def is_torch_tpu_available(check_device=True): "Checks if `torch_xla` is installed and potentially if a TPU is in the environment" warnings.warn( "`is_torch_tpu_available` is deprecated and will be removed in 4.41.0. " "Please use the `is_torch_xla_available` instead.", FutureWarning, ) if not _torch_available: return False if importlib.util.find_spec("torch_xla") is not None: if check_device: # We need to check if `xla_device` can be found, will raise a RuntimeError if not try: import torch_xla.core.xla_model as xm _ = xm.xla_device() return True except RuntimeError: return False return True return False @lru_cache def is_torch_xla_available(check_is_tpu=False, check_is_gpu=False): """ Check if `torch_xla` is available. To train a native pytorch job in an environment with torch xla installed, set the USE_TORCH_XLA to false. """ assert not (check_is_tpu and check_is_gpu), "The check_is_tpu and check_is_gpu cannot both be true." if not _torch_xla_available: return False import torch_xla if check_is_gpu: return torch_xla.runtime.device_type() in ["GPU", "CUDA"] elif check_is_tpu: return torch_xla.runtime.device_type() == "TPU" return True @lru_cache() def is_torch_neuroncore_available(check_device=True): if importlib.util.find_spec("torch_neuronx") is not None: return is_torch_xla_available() return False @lru_cache() def is_torch_npu_available(check_device=False): "Checks if `torch_npu` is installed and potentially if a NPU is in the environment" if not _torch_available or importlib.util.find_spec("torch_npu") is None: return False import torch import torch_npu # noqa: F401 if check_device: try: # Will raise a RuntimeError if no NPU is found _ = torch.npu.device_count() return torch.npu.is_available() except RuntimeError: return False return hasattr(torch, "npu") and torch.npu.is_available() @lru_cache() def is_torch_mlu_available(check_device=False): "Checks if `torch_mlu` is installed and potentially if a MLU is in the environment" if not _torch_available or importlib.util.find_spec("torch_mlu") is None: return False import torch import torch_mlu # noqa: F401 from ..dependency_versions_table import deps deps["deepspeed"] = "deepspeed-mlu>=0.10.1" if check_device: try: # Will raise a RuntimeError if no MLU is found _ = torch.mlu.device_count() return torch.mlu.is_available() except RuntimeError: return False return hasattr(torch, "mlu") and torch.mlu.is_available() def is_torchdynamo_available(): if not is_torch_available(): return False try: import torch._dynamo as dynamo # noqa: F401 return True except Exception: return False def is_torch_compile_available(): if not is_torch_available(): return False import torch # We don't do any version check here to support nighlies marked as 1.14. Ultimately needs to check version against # 2.0 but let's do it later. return hasattr(torch, "compile") def is_torchdynamo_compiling(): if not is_torch_available(): return False try: import torch._dynamo as dynamo # noqa: F401 return dynamo.is_compiling() except Exception: return False def is_torch_tensorrt_fx_available(): if importlib.util.find_spec("torch_tensorrt") is None: return False return importlib.util.find_spec("torch_tensorrt.fx") is not None def is_datasets_available(): return _datasets_available def is_detectron2_available(): return _detectron2_available def is_rjieba_available(): return _rjieba_available def is_psutil_available(): return _psutil_available def is_py3nvml_available(): return _py3nvml_available def is_sacremoses_available(): return _sacremoses_available def is_apex_available(): return _apex_available def is_aqlm_available(): return _aqlm_available def is_av_available(): return _av_available def is_ninja_available(): r""" Code comes from *torch.utils.cpp_extension.is_ninja_available()*. Returns `True` if the [ninja](https://ninja-build.org/) build system is available on the system, `False` otherwise. """ try: subprocess.check_output("ninja --version".split()) except Exception: return False else: return True def is_ipex_available(): def get_major_and_minor_from_version(full_version): return str(version.parse(full_version).major) + "." + str(version.parse(full_version).minor) if not is_torch_available() or not _ipex_available: return False torch_major_and_minor = get_major_and_minor_from_version(_torch_version) ipex_major_and_minor = get_major_and_minor_from_version(_ipex_version) if torch_major_and_minor != ipex_major_and_minor: logger.warning( f"Intel Extension for PyTorch {ipex_major_and_minor} needs to work with PyTorch {ipex_major_and_minor}.*," f" but PyTorch {_torch_version} is found. Please switch to the matching version and run again." ) return False return True @lru_cache def is_torch_xpu_available(check_device=False): "Checks if `intel_extension_for_pytorch` is installed and potentially if a XPU is in the environment" if not is_ipex_available(): return False import intel_extension_for_pytorch # noqa: F401 import torch if check_device: try: # Will raise a RuntimeError if no XPU is found _ = torch.xpu.device_count() return torch.xpu.is_available() except RuntimeError: return False return hasattr(torch, "xpu") and torch.xpu.is_available() def is_bitsandbytes_available(): if not is_torch_available(): return False # bitsandbytes throws an error if cuda is not available # let's avoid that by adding a simple check import torch return _bitsandbytes_available and torch.cuda.is_available() def is_flash_attn_2_available(): if not is_torch_available(): return False if not _is_package_available("flash_attn"): return False # Let's add an extra check to see if cuda is available import torch if not torch.cuda.is_available(): return False if torch.version.cuda: return version.parse(importlib.metadata.version("flash_attn")) >= version.parse("2.1.0") elif torch.version.hip: # TODO: Bump the requirement to 2.1.0 once released in https://github.com/ROCmSoftwarePlatform/flash-attention return version.parse(importlib.metadata.version("flash_attn")) >= version.parse("2.0.4") else: return False def is_flash_attn_greater_or_equal_2_10(): if not _is_package_available("flash_attn"): return False return version.parse(importlib.metadata.version("flash_attn")) >= version.parse("2.1.0") def is_torchdistx_available(): return _torchdistx_available def is_faiss_available(): return _faiss_available def is_scipy_available(): return _scipy_available def is_sklearn_available(): return _sklearn_available def is_sentencepiece_available(): return _sentencepiece_available def is_seqio_available(): return _is_seqio_available def is_protobuf_available(): if importlib.util.find_spec("google") is None: return False return importlib.util.find_spec("google.protobuf") is not None def is_accelerate_available(min_version: str = ACCELERATE_MIN_VERSION): return _accelerate_available and version.parse(_accelerate_version) >= version.parse(min_version) def is_fsdp_available(min_version: str = FSDP_MIN_VERSION): return is_torch_available() and version.parse(_torch_version) >= version.parse(min_version) def is_optimum_available(): return _optimum_available def is_auto_awq_available(): return _auto_awq_available def is_quanto_available(): return _quanto_available def is_auto_gptq_available(): return _auto_gptq_available def is_eetq_available(): return _eetq_available def is_levenshtein_available(): return _levenshtein_available def is_optimum_neuron_available(): return _optimum_available and _is_package_available("optimum.neuron") def is_safetensors_available(): return _safetensors_available def is_tokenizers_available(): return _tokenizers_available @lru_cache def is_vision_available(): _pil_available = importlib.util.find_spec("PIL") is not None if _pil_available: try: package_version = importlib.metadata.version("Pillow") except importlib.metadata.PackageNotFoundError: try: package_version = importlib.metadata.version("Pillow-SIMD") except importlib.metadata.PackageNotFoundError: return False logger.debug(f"Detected PIL version {package_version}") return _pil_available def is_pytesseract_available(): return _pytesseract_available def is_pytest_available(): return _pytest_available def is_spacy_available(): return _spacy_available def is_tensorflow_text_available(): return is_tf_available() and _tensorflow_text_available def is_keras_nlp_available(): return is_tensorflow_text_available() and _keras_nlp_available def is_in_notebook(): try: # Test adapted from tqdm.autonotebook: https://github.com/tqdm/tqdm/blob/master/tqdm/autonotebook.py get_ipython = sys.modules["IPython"].get_ipython if "IPKernelApp" not in get_ipython().config: raise ImportError("console") if "VSCODE_PID" in os.environ: raise ImportError("vscode") if "DATABRICKS_RUNTIME_VERSION" in os.environ and os.environ["DATABRICKS_RUNTIME_VERSION"] < "11.0": # Databricks Runtime 11.0 and above uses IPython kernel by default so it should be compatible with Jupyter notebook # https://docs.microsoft.com/en-us/azure/databricks/notebooks/ipython-kernel raise ImportError("databricks") return importlib.util.find_spec("IPython") is not None except (AttributeError, ImportError, KeyError): return False def is_pytorch_quantization_available(): return _pytorch_quantization_available def is_tensorflow_probability_available(): return _tensorflow_probability_available def is_pandas_available(): return _pandas_available def is_sagemaker_dp_enabled(): # Get the sagemaker specific env variable. sagemaker_params = os.getenv("SM_FRAMEWORK_PARAMS", "{}") try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". sagemaker_params = json.loads(sagemaker_params) if not sagemaker_params.get("sagemaker_distributed_dataparallel_enabled", False): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return _smdistributed_available def is_sagemaker_mp_enabled(): # Get the sagemaker specific mp parameters from smp_options variable. smp_options = os.getenv("SM_HP_MP_PARAMETERS", "{}") try: # Parse it and check the field "partitions" is included, it is required for model parallel. smp_options = json.loads(smp_options) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. mpi_options = os.getenv("SM_FRAMEWORK_PARAMS", "{}") try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". mpi_options = json.loads(mpi_options) if not mpi_options.get("sagemaker_mpi_enabled", False): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return _smdistributed_available def is_training_run_on_sagemaker(): return "SAGEMAKER_JOB_NAME" in os.environ def is_soundfile_availble(): return _soundfile_available def is_timm_available(): return _timm_available def is_natten_available(): return _natten_available def is_nltk_available(): return _nltk_available def is_torchaudio_available(): return _torchaudio_available def is_speech_available(): # For now this depends on torchaudio but the exact dependency might evolve in the future. return _torchaudio_available def is_phonemizer_available(): return _phonemizer_available def torch_only_method(fn): def wrapper(*args, **kwargs): if not _torch_available: raise ImportError( "You need to install pytorch to use this method or class, " "or activate it with environment variables USE_TORCH=1 and USE_TF=0." ) else: return fn(*args, **kwargs) return wrapper def is_ccl_available(): return _is_ccl_available def is_decord_available(): return _decord_available def is_sudachi_available(): return _sudachipy_available def get_sudachi_version(): return _sudachipy_version def is_sudachi_projection_available(): if not is_sudachi_available(): return False # NOTE: We require sudachipy>=0.6.8 to use projection option in sudachi_kwargs for the constructor of BertJapaneseTokenizer. # - `projection` option is not supported in sudachipy<0.6.8, see https://github.com/WorksApplications/sudachi.rs/issues/230 return version.parse(_sudachipy_version) >= version.parse("0.6.8") def is_jumanpp_available(): return (importlib.util.find_spec("rhoknp") is not None) and (shutil.which("jumanpp") is not None) def is_cython_available(): return importlib.util.find_spec("pyximport") is not None def is_jieba_available(): return _jieba_available def is_jinja_available(): return _jinja_available def is_mlx_available(): return _mlx_available # docstyle-ignore AV_IMPORT_ERROR = """ {0} requires the PyAv library but it was not found in your environment. You can install it with: ``` pip install av ``` Please note that you may need to restart your runtime after installation. """ # docstyle-ignore CV2_IMPORT_ERROR = """ {0} requires the OpenCV library but it was not found in your environment. You can install it with: ``` pip install opencv-python ``` Please note that you may need to restart your runtime after installation. """ # docstyle-ignore DATASETS_IMPORT_ERROR = """ {0} requires the 🤗 Datasets library but it was not found in your environment. You can install it with: ``` pip install datasets ``` In a notebook or a colab, you can install it by executing a cell with ``` !pip install datasets ``` then restarting your kernel. Note that if you have a local folder named `datasets` or a local python file named `datasets.py` in your current working directory, python may try to import this instead of the 🤗 Datasets library. You should rename this folder or that python file if that's the case. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore TOKENIZERS_IMPORT_ERROR = """ {0} requires the 🤗 Tokenizers library but it was not found in your environment. You can install it with: ``` pip install tokenizers ``` In a notebook or a colab, you can install it by executing a cell with ``` !pip install tokenizers ``` Please note that you may need to restart your runtime after installation. """ # docstyle-ignore SENTENCEPIECE_IMPORT_ERROR = """ {0} requires the SentencePiece library but it was not found in your environment. Checkout the instructions on the installation page of its repo: https://github.com/google/sentencepiece#installation and follow the ones that match your environment. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore PROTOBUF_IMPORT_ERROR = """ {0} requires the protobuf library but it was not found in your environment. Checkout the instructions on the installation page of its repo: https://github.com/protocolbuffers/protobuf/tree/master/python#installation and follow the ones that match your environment. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore FAISS_IMPORT_ERROR = """ {0} requires the faiss library but it was not found in your environment. Checkout the instructions on the installation page of its repo: https://github.com/facebookresearch/faiss/blob/master/INSTALL.md and follow the ones that match your environment. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore PYTORCH_IMPORT_ERROR = """ {0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore TORCHVISION_IMPORT_ERROR = """ {0} requires the Torchvision library but it was not found in your environment. Checkout the instructions on the installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore PYTORCH_IMPORT_ERROR_WITH_TF = """ {0} requires the PyTorch library but it was not found in your environment. However, we were able to find a TensorFlow installation. TensorFlow classes begin with "TF", but are otherwise identically named to our PyTorch classes. This means that the TF equivalent of the class you tried to import would be "TF{0}". If you want to use TensorFlow, please use TF classes instead! If you really do want to use PyTorch please go to https://pytorch.org/get-started/locally/ and follow the instructions that match your environment. """ # docstyle-ignore TF_IMPORT_ERROR_WITH_PYTORCH = """ {0} requires the TensorFlow library but it was not found in your environment. However, we were able to find a PyTorch installation. PyTorch classes do not begin with "TF", but are otherwise identically named to our TF classes. If you want to use PyTorch, please use those classes instead! If you really do want to use TensorFlow, please follow the instructions on the installation page https://www.tensorflow.org/install that match your environment. """ # docstyle-ignore BS4_IMPORT_ERROR = """ {0} requires the Beautiful Soup library but it was not found in your environment. You can install it with pip: `pip install beautifulsoup4`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore SKLEARN_IMPORT_ERROR = """ {0} requires the scikit-learn library but it was not found in your environment. You can install it with: ``` pip install -U scikit-learn ``` In a notebook or a colab, you can install it by executing a cell with ``` !pip install -U scikit-learn ``` Please note that you may need to restart your runtime after installation. """ # docstyle-ignore TENSORFLOW_IMPORT_ERROR = """ {0} requires the TensorFlow library but it was not found in your environment. Checkout the instructions on the installation page: https://www.tensorflow.org/install and follow the ones that match your environment. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore DETECTRON2_IMPORT_ERROR = """ {0} requires the detectron2 library but it was not found in your environment. Checkout the instructions on the installation page: https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md and follow the ones that match your environment. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore FLAX_IMPORT_ERROR = """ {0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the installation page: https://github.com/google/flax and follow the ones that match your environment. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore FTFY_IMPORT_ERROR = """ {0} requires the ftfy library but it was not found in your environment. Checkout the instructions on the installation section: https://github.com/rspeer/python-ftfy/tree/master#installing and follow the ones that match your environment. Please note that you may need to restart your runtime after installation. """ LEVENSHTEIN_IMPORT_ERROR = """ {0} requires the python-Levenshtein library but it was not found in your environment. You can install it with pip: `pip install python-Levenshtein`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore G2P_EN_IMPORT_ERROR = """ {0} requires the g2p-en library but it was not found in your environment. You can install it with pip: `pip install g2p-en`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore PYTORCH_QUANTIZATION_IMPORT_ERROR = """ {0} requires the pytorch-quantization library but it was not found in your environment. You can install it with pip: `pip install pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com` Please note that you may need to restart your runtime after installation. """ # docstyle-ignore TENSORFLOW_PROBABILITY_IMPORT_ERROR = """ {0} requires the tensorflow_probability library but it was not found in your environment. You can install it with pip as explained here: https://github.com/tensorflow/probability. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore TENSORFLOW_TEXT_IMPORT_ERROR = """ {0} requires the tensorflow_text library but it was not found in your environment. You can install it with pip as explained here: https://www.tensorflow.org/text/guide/tf_text_intro. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore PANDAS_IMPORT_ERROR = """ {0} requires the pandas library but it was not found in your environment. You can install it with pip as explained here: https://pandas.pydata.org/pandas-docs/stable/getting_started/install.html. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore PHONEMIZER_IMPORT_ERROR = """ {0} requires the phonemizer library but it was not found in your environment. You can install it with pip: `pip install phonemizer`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore SACREMOSES_IMPORT_ERROR = """ {0} requires the sacremoses library but it was not found in your environment. You can install it with pip: `pip install sacremoses`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore SCIPY_IMPORT_ERROR = """ {0} requires the scipy library but it was not found in your environment. You can install it with pip: `pip install scipy`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore SPEECH_IMPORT_ERROR = """ {0} requires the torchaudio library but it was not found in your environment. You can install it with pip: `pip install torchaudio`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore TIMM_IMPORT_ERROR = """ {0} requires the timm library but it was not found in your environment. You can install it with pip: `pip install timm`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore NATTEN_IMPORT_ERROR = """ {0} requires the natten library but it was not found in your environment. You can install it by referring to: shi-labs.com/natten . You can also install it with pip (may take longer to build): `pip install natten`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore NLTK_IMPORT_ERROR = """ {0} requires the NLTK library but it was not found in your environment. You can install it by referring to: https://www.nltk.org/install.html. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore VISION_IMPORT_ERROR = """ {0} requires the PIL library but it was not found in your environment. You can install it with pip: `pip install pillow`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore PYTESSERACT_IMPORT_ERROR = """ {0} requires the PyTesseract library but it was not found in your environment. You can install it with pip: `pip install pytesseract`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore PYCTCDECODE_IMPORT_ERROR = """ {0} requires the pyctcdecode library but it was not found in your environment. You can install it with pip: `pip install pyctcdecode`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore ACCELERATE_IMPORT_ERROR = """ {0} requires the accelerate library >= {ACCELERATE_MIN_VERSION} it was not found in your environment. You can install or update it with pip: `pip install --upgrade accelerate`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore CCL_IMPORT_ERROR = """ {0} requires the torch ccl library but it was not found in your environment. You can install it with pip: `pip install oneccl_bind_pt -f https://developer.intel.com/ipex-whl-stable` Please note that you may need to restart your runtime after installation. """ # docstyle-ignore ESSENTIA_IMPORT_ERROR = """ {0} requires essentia library. But that was not found in your environment. You can install them with pip: `pip install essentia==2.1b6.dev1034` Please note that you may need to restart your runtime after installation. """ # docstyle-ignore LIBROSA_IMPORT_ERROR = """ {0} requires thes librosa library. But that was not found in your environment. You can install them with pip: `pip install librosa` Please note that you may need to restart your runtime after installation. """ # docstyle-ignore PRETTY_MIDI_IMPORT_ERROR = """ {0} requires thes pretty_midi library. But that was not found in your environment. You can install them with pip: `pip install pretty_midi` Please note that you may need to restart your runtime after installation. """ DECORD_IMPORT_ERROR = """ {0} requires the decord library but it was not found in your environment. You can install it with pip: `pip install decord`. Please note that you may need to restart your runtime after installation. """ CYTHON_IMPORT_ERROR = """ {0} requires the Cython library but it was not found in your environment. You can install it with pip: `pip install Cython`. Please note that you may need to restart your runtime after installation. """ JIEBA_IMPORT_ERROR = """ {0} requires the jieba library but it was not found in your environment. You can install it with pip: `pip install jieba`. Please note that you may need to restart your runtime after installation. """ PEFT_IMPORT_ERROR = """ {0} requires the peft library but it was not found in your environment. You can install it with pip: `pip install peft`. Please note that you may need to restart your runtime after installation. """ JINJA_IMPORT_ERROR = """ {0} requires the jinja library but it was not found in your environment. You can install it with pip: `pip install jinja2`. Please note that you may need to restart your runtime after installation. """ BACKENDS_MAPPING = OrderedDict( [ ("av", (is_av_available, AV_IMPORT_ERROR)), ("bs4", (is_bs4_available, BS4_IMPORT_ERROR)), ("cv2", (is_cv2_available, CV2_IMPORT_ERROR)), ("datasets", (is_datasets_available, DATASETS_IMPORT_ERROR)), ("detectron2", (is_detectron2_available, DETECTRON2_IMPORT_ERROR)), ("essentia", (is_essentia_available, ESSENTIA_IMPORT_ERROR)), ("faiss", (is_faiss_available, FAISS_IMPORT_ERROR)), ("flax", (is_flax_available, FLAX_IMPORT_ERROR)), ("ftfy", (is_ftfy_available, FTFY_IMPORT_ERROR)), ("g2p_en", (is_g2p_en_available, G2P_EN_IMPORT_ERROR)), ("pandas", (is_pandas_available, PANDAS_IMPORT_ERROR)), ("phonemizer", (is_phonemizer_available, PHONEMIZER_IMPORT_ERROR)), ("pretty_midi", (is_pretty_midi_available, PRETTY_MIDI_IMPORT_ERROR)), ("levenshtein", (is_levenshtein_available, LEVENSHTEIN_IMPORT_ERROR)), ("librosa", (is_librosa_available, LIBROSA_IMPORT_ERROR)), ("protobuf", (is_protobuf_available, PROTOBUF_IMPORT_ERROR)), ("pyctcdecode", (is_pyctcdecode_available, PYCTCDECODE_IMPORT_ERROR)), ("pytesseract", (is_pytesseract_available, PYTESSERACT_IMPORT_ERROR)), ("sacremoses", (is_sacremoses_available, SACREMOSES_IMPORT_ERROR)), ("pytorch_quantization", (is_pytorch_quantization_available, PYTORCH_QUANTIZATION_IMPORT_ERROR)), ("sentencepiece", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)), ("sklearn", (is_sklearn_available, SKLEARN_IMPORT_ERROR)), ("speech", (is_speech_available, SPEECH_IMPORT_ERROR)), ("tensorflow_probability", (is_tensorflow_probability_available, TENSORFLOW_PROBABILITY_IMPORT_ERROR)), ("tf", (is_tf_available, TENSORFLOW_IMPORT_ERROR)), ("tensorflow_text", (is_tensorflow_text_available, TENSORFLOW_TEXT_IMPORT_ERROR)), ("timm", (is_timm_available, TIMM_IMPORT_ERROR)), ("natten", (is_natten_available, NATTEN_IMPORT_ERROR)), ("nltk", (is_nltk_available, NLTK_IMPORT_ERROR)), ("tokenizers", (is_tokenizers_available, TOKENIZERS_IMPORT_ERROR)), ("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)), ("torchvision", (is_torchvision_available, TORCHVISION_IMPORT_ERROR)), ("vision", (is_vision_available, VISION_IMPORT_ERROR)), ("scipy", (is_scipy_available, SCIPY_IMPORT_ERROR)), ("accelerate", (is_accelerate_available, ACCELERATE_IMPORT_ERROR)), ("oneccl_bind_pt", (is_ccl_available, CCL_IMPORT_ERROR)), ("decord", (is_decord_available, DECORD_IMPORT_ERROR)), ("cython", (is_cython_available, CYTHON_IMPORT_ERROR)), ("jieba", (is_jieba_available, JIEBA_IMPORT_ERROR)), ("peft", (is_peft_available, PEFT_IMPORT_ERROR)), ("jinja", (is_jinja_available, JINJA_IMPORT_ERROR)), ] ) def requires_backends(obj, backends): if not isinstance(backends, (list, tuple)): backends = [backends] name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ # Raise an error for users who might not realize that classes without "TF" are torch-only if "torch" in backends and "tf" not in backends and not is_torch_available() and is_tf_available(): raise ImportError(PYTORCH_IMPORT_ERROR_WITH_TF.format(name)) # Raise the inverse error for PyTorch users trying to load TF classes if "tf" in backends and "torch" not in backends and is_torch_available() and not is_tf_available(): raise ImportError(TF_IMPORT_ERROR_WITH_PYTORCH.format(name)) checks = (BACKENDS_MAPPING[backend] for backend in backends) failed = [msg.format(name) for available, msg in checks if not available()] if failed: raise ImportError("".join(failed)) class DummyObject(type): """ Metaclass for the dummy objects. Any class inheriting from it will return the ImportError generated by `requires_backend` each time a user tries to access any method of that class. """ def __getattribute__(cls, key): if key.startswith("_") and key != "_from_config": return super().__getattribute__(key) requires_backends(cls, cls._backends) def is_torch_fx_proxy(x): if is_torch_fx_available(): import torch.fx return isinstance(x, torch.fx.Proxy) return False class _LazyModule(ModuleType): """ Module class that surfaces all objects but only performs associated imports when the objects are requested. """ # Very heavily inspired by optuna.integration._IntegrationModule # https://github.com/optuna/optuna/blob/master/optuna/integration/__init__.py def __init__(self, name, module_file, import_structure, module_spec=None, extra_objects=None): super().__init__(name) self._modules = set(import_structure.keys()) self._class_to_module = {} for key, values in import_structure.items(): for value in values: self._class_to_module[value] = key # Needed for autocompletion in an IDE self.__all__ = list(import_structure.keys()) + list(chain(*import_structure.values())) self.__file__ = module_file self.__spec__ = module_spec self.__path__ = [os.path.dirname(module_file)] self._objects = {} if extra_objects is None else extra_objects self._name = name self._import_structure = import_structure # Needed for autocompletion in an IDE def __dir__(self): result = super().__dir__() # The elements of self.__all__ that are submodules may or may not be in the dir already, depending on whether # they have been accessed or not. So we only add the elements of self.__all__ that are not already in the dir. for attr in self.__all__: if attr not in result: result.append(attr) return result def __getattr__(self, name: str) -> Any: if name in self._objects: return self._objects[name] if name in self._modules: value = self._get_module(name) elif name in self._class_to_module.keys(): module = self._get_module(self._class_to_module[name]) value = getattr(module, name) else: raise AttributeError(f"module {self.__name__} has no attribute {name}") setattr(self, name, value) return value def _get_module(self, module_name: str): try: return importlib.import_module("." + module_name, self.__name__) except Exception as e: raise RuntimeError( f"Failed to import {self.__name__}.{module_name} because of the following error (look up to see its" f" traceback):\n{e}" ) from e def __reduce__(self): return (self.__class__, (self._name, self.__file__, self._import_structure)) class OptionalDependencyNotAvailable(BaseException): """Internally used error class for signalling an optional dependency was not found.""" def direct_transformers_import(path: str, file="__init__.py") -> ModuleType: """Imports transformers directly Args: path (`str`): The path to the source file file (`str`, optional): The file to join with the path. Defaults to "__init__.py". Returns: `ModuleType`: The resulting imported module """ name = "transformers" location = os.path.join(path, file) spec = importlib.util.spec_from_file_location(name, location, submodule_search_locations=[path]) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) module = sys.modules[name] return module
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/integrations/quanto.py
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..utils import is_torch_available if is_torch_available(): import torch def replace_with_quanto_layers( model, quantization_config=None, modules_to_not_convert=None, current_key_name=None, has_been_replaced=False, ): """ Public method that recursively replaces the Linear layers of the given model with Quanto quantized layers. Returns the converted model and a boolean that indicates if the conversion has been successfull or not. Args: model (`torch.nn.Module`): The model to convert, can be any `torch.nn.Module` instance. quantization_config (`AqlmConfig`, defaults to `None`): The quantization config object that contains the quantization parameters. modules_to_not_convert (`list`, *optional*, defaults to `None`): A list of modules to not convert. If a module name is in the list (e.g. `lm_head`), it will not be converted. current_key_name (`list`, *optional*, defaults to `None`): A list that contains the current key name. This is used for recursion and should not be passed by the user. has_been_replaced (`bool`, *optional*, defaults to `None`): A boolean that indicates if the conversion has been successful or not. This is used for recursion and should not be passed by the user. """ from accelerate import init_empty_weights from quanto import QLayerNorm, QLinear, qfloat8, qint2, qint4, qint8 w_mapping = {"float8": qfloat8, "int8": qint8, "int4": qint4, "int2": qint2} a_mapping = {None: None, "float8": qfloat8, "int8": qint8} if modules_to_not_convert is None: modules_to_not_convert = [] for name, module in model.named_children(): if current_key_name is None: current_key_name = [] current_key_name.append(name) if not any(key in ".".join(current_key_name) for key in modules_to_not_convert): with init_empty_weights(): if isinstance(module, torch.nn.Linear): model._modules[name] = QLinear( in_features=module.in_features, out_features=module.out_features, bias=module.bias is not None, dtype=module.weight.dtype, weights=w_mapping[quantization_config.weights], activations=a_mapping[quantization_config.activations], ) model._modules[name].requires_grad_(False) has_been_replaced = True elif isinstance(module, torch.nn.LayerNorm): if quantization_config.activations is not None: model._modules[name] = QLayerNorm( module.normalized_shape, module.eps, module.elementwise_affine, module.bias is not None, activations=a_mapping[quantization_config.activations], ) has_been_replaced = True if len(list(module.children())) > 0: _, has_been_replaced = replace_with_quanto_layers( module, quantization_config=quantization_config, modules_to_not_convert=modules_to_not_convert, current_key_name=current_key_name, has_been_replaced=has_been_replaced, ) # Remove the last key for recursion current_key_name.pop(-1) return model, has_been_replaced
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/integrations/deepspeed.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Integration with Deepspeed """ import copy import importlib.metadata as importlib_metadata import importlib.util import weakref from functools import partialmethod from ..dependency_versions_check import dep_version_check from ..utils import is_accelerate_available, is_torch_available, is_torch_mlu_available, logging if is_torch_available(): import torch logger = logging.get_logger(__name__) def is_deepspeed_available(): package_exists = importlib.util.find_spec("deepspeed") is not None # Check we're not importing a "deepspeed" directory somewhere but the actual library by trying to grab the version # AND checking it has an author field in the metadata that is HuggingFace. if package_exists: try: if is_torch_mlu_available(): _ = importlib_metadata.metadata("deepspeed-mlu") return True _ = importlib_metadata.metadata("deepspeed") return True except importlib_metadata.PackageNotFoundError: return False if is_accelerate_available() and is_deepspeed_available(): from accelerate.utils.deepspeed import HfDeepSpeedConfig as DeepSpeedConfig else: # Inherits from a dummy `object` if accelerate is not available, so that python succeeds to import this file. # Deepspeed glue code will never inherit this dummy object as it checks if accelerate is available. from builtins import object as DeepSpeedConfig class HfDeepSpeedConfig(DeepSpeedConfig): """ This object contains a DeepSpeed configuration dictionary and can be quickly queried for things like zero stage. A `weakref` of this object is stored in the module's globals to be able to access the config from areas where things like the Trainer object is not available (e.g. `from_pretrained` and `_get_resized_embeddings`). Therefore it's important that this object remains alive while the program is still running. [`Trainer`] uses the `HfTrainerDeepSpeedConfig` subclass instead. That subclass has logic to sync the configuration with values of [`TrainingArguments`] by replacing special placeholder values: `"auto"`. Without this special logic the DeepSpeed configuration is not modified in any way. Args: config_file_or_dict (`Union[str, Dict]`): path to DeepSpeed config file or dict. """ def __init__(self, config_file_or_dict): # set global weakref object set_hf_deepspeed_config(self) dep_version_check("accelerate") dep_version_check("deepspeed") super().__init__(config_file_or_dict) class HfTrainerDeepSpeedConfig(HfDeepSpeedConfig): """ The `HfTrainerDeepSpeedConfig` object is meant to be created during `TrainingArguments` object creation and has the same lifespan as the latter. """ def __init__(self, config_file_or_dict): super().__init__(config_file_or_dict) self._dtype = None self.mismatches = [] def dtype(self): if self._dtype is None: raise ValueError("trainer_config_process() wasn't called yet to tell dtype") return self._dtype def is_auto(self, ds_key_long): val = self.get_value(ds_key_long) if val is None: return False else: return val == "auto" def fill_match(self, ds_key_long, hf_val, hf_key=None, must_match=True): """ A utility method that massages the config file and can optionally verify that the values match. 1. Replace "auto" values with `TrainingArguments` value. 2. If it wasn't "auto" and `must_match` is true, then check that DS config matches Trainer config values and if mismatched add the entry to `self.mismatched` - will assert during `trainer_config_finalize` for one or more mismatches. """ config, ds_key = self.find_config_node(ds_key_long) if config is None: return if config.get(ds_key) == "auto": config[ds_key] = hf_val return if not must_match: return ds_val = config.get(ds_key) if ds_val is not None and ds_val != hf_val: self.mismatches.append(f"- ds {ds_key_long}={ds_val} vs hf {hf_key}={hf_val}") fill_only = partialmethod(fill_match, must_match=False) def trainer_config_process(self, args, auto_find_batch_size=False): """ Adjust the config with `TrainingArguments` values. This stage is run during `TrainingArguments` object creation. """ # DeepSpeed does: # train_batch_size = world_size * train_micro_batch_size_per_gpu * gradient_accumulation_steps train_batch_size = args.world_size * args.per_device_train_batch_size * args.gradient_accumulation_steps self.fill_match( "train_micro_batch_size_per_gpu", args.per_device_train_batch_size, "per_device_train_batch_size", not auto_find_batch_size, ) self.fill_match( "gradient_accumulation_steps", args.gradient_accumulation_steps, "gradient_accumulation_steps", ) self.fill_match( "train_batch_size", train_batch_size, "train_batch_size (calculated)", not auto_find_batch_size, ) self.fill_match("gradient_clipping", args.max_grad_norm, "max_grad_norm") self.fill_match("optimizer.params.lr", args.learning_rate, "learning_rate") self.fill_match( "optimizer.params.betas", [args.adam_beta1, args.adam_beta2], "adam_beta1+adam_beta2", ) self.fill_match("optimizer.params.eps", args.adam_epsilon, "adam_epsilon") self.fill_match("optimizer.params.weight_decay", args.weight_decay, "weight_decay") self.fill_only("scheduler.params.warmup_min_lr", 0) # not a trainer arg self.fill_match("scheduler.params.warmup_max_lr", args.learning_rate, "learning_rate") # total_num_steps - will get set in trainer_config_finalize # fp16 if args.fp16 or args.fp16_full_eval: fp16_backend = "apex" if args.fp16_backend == "apex" else "amp" else: fp16_backend = None if args.save_on_each_node: # deepspeed uses shared storage by default. Let's override this setting if save_on_each_node == True self.config["checkpoint"] = self.config.get("checkpoint", {}) self.config["checkpoint"]["use_node_local_storage"] = args.save_on_each_node # amp: similar to the pytorch native amp - it has a bunch of optional params but we won't set # any here unless the user did the work self.fill_match( "fp16.enabled", ((args.fp16 or args.fp16_full_eval) and fp16_backend == "amp"), "fp16|fp16_full_eval+fp16_backend(amp)", ) # apex: delegates amp work to apex (which needs to be available), but it cannot be used with any # ZeRO features self.fill_match("amp.enabled", fp16_backend == "apex", "fp16+fp16_backend(apex)") self.fill_match("amp.opt_level", args.fp16_opt_level, "fp16_opt_level") self.fill_match("bf16.enabled", (args.bf16 or args.bf16_full_eval), "bf16|bf16_full_eval") # deepspeed's default mode is fp16 unless there is a config that says differently if self.is_true("bf16.enabled"): self._dtype = torch.bfloat16 elif self.is_false("fp16.enabled"): self._dtype = torch.float32 else: self._dtype = torch.float16 def trainer_config_finalize(self, args, model, num_training_steps): """ This stage is run after we have the model and know num_training_steps. Now we can complete the configuration process. """ # zero # deal with config keys that use `auto` value and rely on model's hidden_size hidden_size_based_keys = [ "zero_optimization.reduce_bucket_size", "zero_optimization.stage3_prefetch_bucket_size", "zero_optimization.stage3_param_persistence_threshold", ] hidden_size_auto_keys = [x for x in hidden_size_based_keys if self.is_auto(x)] if len(hidden_size_auto_keys) > 0: if hasattr(model.config, "hidden_size"): hidden_size = model.config.hidden_size elif hasattr(model.config, "hidden_sizes"): # if there are many hidden sizes pick the largest one hidden_size = max(model.config.hidden_sizes) else: raise ValueError( "The model's config file has neither `hidden_size` nor `hidden_sizes` entry, " "therefore it's not possible to automatically fill out the following `auto` entries " f"in the DeepSpeed config file: {hidden_size_auto_keys}. You can fix that by replacing " "`auto` values for these keys with an integer value of your choice." ) self.fill_only("zero_optimization.reduce_bucket_size", hidden_size * hidden_size) if self.is_zero3(): # automatically assign the optimal config values based on model config self.fill_only( "zero_optimization.stage3_prefetch_bucket_size", 0.9 * hidden_size * hidden_size, ) self.fill_only( "zero_optimization.stage3_param_persistence_threshold", 10 * hidden_size, ) # scheduler self.fill_match( "scheduler.params.total_num_steps", num_training_steps, "num_training_steps (calculated)", ) self.fill_match( "scheduler.params.warmup_num_steps", args.get_warmup_steps(num_training_steps), "warmup_steps", ) if len(self.mismatches) > 0: mismatches = "\n".join(self.mismatches) raise ValueError( "Please correct the following DeepSpeed config values that mismatch TrainingArguments" f" values:\n{mismatches}\nThe easiest method is to set these DeepSpeed config values to 'auto'." ) # keep the config object global to be able to access it anywhere during TrainingArguments life-cycle _hf_deepspeed_config_weak_ref = None def set_hf_deepspeed_config(hf_deepspeed_config_obj): # this is a special weakref global object to allow us to get to Deepspeed config from APIs # that don't have an easy way to get to the Deepspeed config outside of the Trainer domain. global _hf_deepspeed_config_weak_ref # will go away automatically when HfDeepSpeedConfig is destroyed (when TrainingArguments is destroyed) _hf_deepspeed_config_weak_ref = weakref.ref(hf_deepspeed_config_obj) def unset_hf_deepspeed_config(): # useful for unit tests to ensure the global state doesn't leak - call from `tearDown` method global _hf_deepspeed_config_weak_ref _hf_deepspeed_config_weak_ref = None def is_deepspeed_zero3_enabled(): if _hf_deepspeed_config_weak_ref is not None and _hf_deepspeed_config_weak_ref() is not None: return _hf_deepspeed_config_weak_ref().is_zero3() else: return False def deepspeed_config(): if _hf_deepspeed_config_weak_ref is not None and _hf_deepspeed_config_weak_ref() is not None: return _hf_deepspeed_config_weak_ref().config else: return None def deepspeed_optim_sched(trainer, hf_deepspeed_config, args, num_training_steps, model_parameters): """ A convenience wrapper that deals with optimizer and lr scheduler configuration. """ from accelerate.utils import DummyOptim, DummyScheduler config = hf_deepspeed_config.config # Mixing and matching DS schedulers and optimizers is supported unless Offload is enabled in which case it's: # 1. DS scheduler + DS optimizer: Yes # 2. HF scheduler + HF optimizer: Mostly* # 3. DS scheduler + HF optimizer: Mostly* # 4. HF scheduler + DS optimizer: Yes # # Mostly*: All non-native DeepSpeed optimizers that have both CPU and GPU implementation should work (except LAMB) optimizer = None if "optimizer" in config: if args.adafactor: raise ValueError( "--adafactor was passed, but also found `optimizer` configured in the DeepSpeed config. " "Only one optimizer can be configured." ) optimizer = DummyOptim(params=model_parameters) else: if hf_deepspeed_config.is_offload(): logger.info( "Detected ZeRO Offload and non-DeepSpeed optimizers: This combination should work as long as the" " custom optimizer has both CPU and GPU implementation (except LAMB)" ) # ds supports Adam, OneBitAdam, and Lamb optimizers and can import other optimizers from torch. # But trainer uses AdamW by default. optimizer = trainer.create_optimizer() # To use other optimizers requires voiding warranty with: `zero_allow_untested_optimizer` config["zero_allow_untested_optimizer"] = True lr_scheduler = None if "scheduler" in config: lr_scheduler = DummyScheduler(optimizer) else: if isinstance(optimizer, DummyOptim): def _lr_scheduler_callable(optimizer): # create a shallow copy first, so later modifications do not affect original trainer trainer_copy = copy.copy(trainer) # at the time _lr_scheduler_callable is called, trainer.lr_scheduler has been set # update it to None so that we can re-create a new scheduler trainer_copy.lr_scheduler = None lr_scheduler = trainer_copy.create_scheduler( num_training_steps=num_training_steps, optimizer=optimizer ) return lr_scheduler lr_scheduler = DummyScheduler(optimizer, lr_scheduler_callable=_lr_scheduler_callable) else: lr_scheduler = trainer.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer) return optimizer, lr_scheduler def deepspeed_init(trainer, num_training_steps, inference=False): """ Init DeepSpeed, after updating the DeepSpeed configuration with any relevant Trainer's args. If `resume_from_checkpoint` was passed then an attempt to resume from a previously saved checkpoint will be made. Args: trainer: Trainer object num_training_steps: per single gpu resume_from_checkpoint: path to a checkpoint if to resume from after normal DeepSpeedEngine load inference: launch in inference mode (no optimizer and no lr scheduler) auto_find_batch_size: whether to ignore the `train_micro_batch_size_per_gpu` argument as it's being set automatically by the auto batch size finder Returns: optimizer, lr_scheduler We may use `deepspeed_init` more than once during the life of Trainer, when we do - it's a temp hack based on: https://github.com/microsoft/DeepSpeed/issues/1394#issuecomment-937405374 until Deepspeed fixes a bug where it can't resume from a checkpoint after it did some stepping https://github.com/microsoft/DeepSpeed/issues/1612 """ from deepspeed.utils import logger as ds_logger model = trainer.model args = trainer.args hf_deepspeed_config = trainer.accelerator.state.deepspeed_plugin.hf_ds_config # resume config update - some bits like `model` and `num_training_steps` only become available during train hf_deepspeed_config.trainer_config_finalize(args, model, num_training_steps) # set the Deepspeed log level consistent with the Trainer ds_logger.setLevel(args.get_process_log_level()) if inference: # only Z3 makes sense for the inference if not hf_deepspeed_config.is_zero3(): raise ValueError("ZeRO inference only makes sense with ZeRO Stage 3 - please adjust your config") # in case the training config is re-used for inference hf_deepspeed_config.del_config_sub_tree("optimizer") hf_deepspeed_config.del_config_sub_tree("lr_scheduler") optimizer, lr_scheduler = None, None model_parameters = None else: trainer.optimizer = None # important for when deepspeed_init is used as re-init model_parameters = list(filter(lambda p: p.requires_grad, model.parameters())) optimizer, lr_scheduler = deepspeed_optim_sched( trainer, hf_deepspeed_config, args, num_training_steps, model_parameters ) # keep for quick debug: # from pprint import pprint; pprint(config) return optimizer, lr_scheduler def deepspeed_load_checkpoint(deepspeed_engine, checkpoint_path, load_module_strict=True): # it's possible that the user is trying to resume from model_path, which doesn't necessarily # contain a deepspeed checkpoint. e.g. examples just check if the dir exists and assume it's # a resume from a checkpoint and not just a local pretrained weight. So we check here if the # path contains what looks like a deepspeed checkpoint import glob deepspeed_checkpoint_dirs = sorted(glob.glob(f"{checkpoint_path}/global_step*")) if len(deepspeed_checkpoint_dirs) > 0: logger.info(f"Attempting to resume from {checkpoint_path}") # this magically updates self.optimizer and self.lr_scheduler load_path, _ = deepspeed_engine.load_checkpoint( checkpoint_path, load_module_strict=load_module_strict, load_optimizer_states=True, load_lr_scheduler_states=True, ) if load_path is None: raise ValueError(f"[deepspeed] failed to resume from checkpoint {checkpoint_path}") else: raise ValueError(f"Can't find a valid checkpoint at {checkpoint_path}")
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/integrations/__init__.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..utils import _LazyModule _import_structure = { "aqlm": ["replace_with_aqlm_linear"], "awq": [ "fuse_awq_modules", "post_init_awq_exllama_modules", "replace_with_awq_linear", ], "bitsandbytes": [ "get_keys_to_not_convert", "replace_8bit_linear", "replace_with_bnb_linear", "set_module_8bit_tensor_to_device", "set_module_quantized_tensor_to_device", ], "deepspeed": [ "HfDeepSpeedConfig", "HfTrainerDeepSpeedConfig", "deepspeed_config", "deepspeed_init", "deepspeed_load_checkpoint", "deepspeed_optim_sched", "is_deepspeed_available", "is_deepspeed_zero3_enabled", "set_hf_deepspeed_config", "unset_hf_deepspeed_config", ], "eetq": ["replace_with_eetq_linear"], "hqq": ["prepare_for_hqq_linear"], "integration_utils": [ "INTEGRATION_TO_CALLBACK", "AzureMLCallback", "ClearMLCallback", "CodeCarbonCallback", "CometCallback", "DagsHubCallback", "DVCLiveCallback", "FlyteCallback", "MLflowCallback", "NeptuneCallback", "NeptuneMissingConfiguration", "TensorBoardCallback", "WandbCallback", "get_available_reporting_integrations", "get_reporting_integration_callbacks", "hp_params", "is_azureml_available", "is_clearml_available", "is_codecarbon_available", "is_comet_available", "is_dagshub_available", "is_dvclive_available", "is_flyte_deck_standard_available", "is_flytekit_available", "is_mlflow_available", "is_neptune_available", "is_optuna_available", "is_ray_available", "is_ray_tune_available", "is_sigopt_available", "is_tensorboard_available", "is_wandb_available", "rewrite_logs", "run_hp_search_optuna", "run_hp_search_ray", "run_hp_search_sigopt", "run_hp_search_wandb", ], "peft": ["PeftAdapterMixin"], "quanto": ["replace_with_quanto_layers"], } if TYPE_CHECKING: from .aqlm import replace_with_aqlm_linear from .awq import ( fuse_awq_modules, post_init_awq_exllama_modules, replace_with_awq_linear, ) from .bitsandbytes import ( get_keys_to_not_convert, replace_8bit_linear, replace_with_bnb_linear, set_module_8bit_tensor_to_device, set_module_quantized_tensor_to_device, ) from .deepspeed import ( HfDeepSpeedConfig, HfTrainerDeepSpeedConfig, deepspeed_config, deepspeed_init, deepspeed_load_checkpoint, deepspeed_optim_sched, is_deepspeed_available, is_deepspeed_zero3_enabled, set_hf_deepspeed_config, unset_hf_deepspeed_config, ) from .eetq import replace_with_eetq_linear from .hqq import prepare_for_hqq_linear from .integration_utils import ( INTEGRATION_TO_CALLBACK, AzureMLCallback, ClearMLCallback, CodeCarbonCallback, CometCallback, DagsHubCallback, DVCLiveCallback, FlyteCallback, MLflowCallback, NeptuneCallback, NeptuneMissingConfiguration, TensorBoardCallback, WandbCallback, get_available_reporting_integrations, get_reporting_integration_callbacks, hp_params, is_azureml_available, is_clearml_available, is_codecarbon_available, is_comet_available, is_dagshub_available, is_dvclive_available, is_flyte_deck_standard_available, is_flytekit_available, is_mlflow_available, is_neptune_available, is_optuna_available, is_ray_available, is_ray_tune_available, is_sigopt_available, is_tensorboard_available, is_wandb_available, rewrite_logs, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .peft import PeftAdapterMixin from .quanto import replace_with_quanto_layers else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/integrations/hqq.py
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. "HQQ (Half-Quadratic Quantization) integration file" from ..utils import is_hqq_available, is_torch_available, logging if is_torch_available(): import torch logger = logging.get_logger(__name__) # Name all modules inside the model def autoname_modules(model): for name, module in model.named_modules(): module.name = name # Get the linear_tag from a modul name. For example: model.layers.31.self_attn.k_proj -> self_attn.k_proj def name_to_linear_tag(name): return ".".join([n for n in name.split(".") if ((n not in ["model", "layers"]) and (not n.isnumeric()))]) # Get all linear tags available def get_linear_tags(model): if is_hqq_available(): from hqq.core.quantize import HQQLinear linear_tags = set() for name, module in model.named_modules(): if isinstance(module, (torch.nn.Linear, HQQLinear)): linear_tags.add(name_to_linear_tag(name)) return list(linear_tags) def _prepare_for_hqq_linear(model, patch_params, has_been_replaced, current_key_name=None): for name, module in model.named_children(): if current_key_name is None: current_key_name = [] current_key_name.append(name) if isinstance(module, torch.nn.Linear): # Get linear tag linear_tag = name_to_linear_tag(module.name) # We put the module quant_config into the nn.Linear layer so we can access it later in quantizer_hqq.create_quantized_param() if linear_tag in patch_params: if patch_params[linear_tag] is not None: model._modules[name].quant_config = patch_params[linear_tag] # Store the module class in case we need to transpose the weight later model._modules[name].source_cls = type(module) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(False) has_been_replaced = True if len(list(module.children())) > 0: _, has_been_replaced = _prepare_for_hqq_linear( module, patch_params=patch_params, has_been_replaced=has_been_replaced, ) # Remove the last key for recursion current_key_name.pop(-1) return model, has_been_replaced def prepare_for_hqq_linear(model, quantization_config=None, modules_to_not_convert=None, has_been_replaced=False): """ Prepares nn.Linear layers for HQQ quantization. Since each layer type can have separate quantization parameters, we need to do the following: 1- tag each module with its neme via autoname_modules() 2- Extract linear_tags (e.g. ['self_attn.q_proj', ...]) 3- Map quantization parameters as a dictionary linear_tag -> quant_params as HQQLinear exepects it, this is referred to as patch_params """ modules_to_not_convert = [] if modules_to_not_convert is None else modules_to_not_convert # Add name to module autoname_modules(model) # Get linear tags. This allows us to use different quant params to different layer types linear_tags = get_linear_tags(model) # Convert quantization_config to layer-wise config skip_modules = quantization_config.skip_modules quant_config = quantization_config.to_dict() linear_tags = list(set(linear_tags) - set(skip_modules) - set(modules_to_not_convert)) if any(key in linear_tags for key in quant_config.keys()): # If the user doesn't specify a key from get_linear_tags, the layer is not quantized via (key, None) patch_params = {key: None for key in linear_tags} patch_params.update(quant_config) else: # Same quant_config for all layers patch_params = {k: quant_config for k in linear_tags} model, has_been_replaced = _prepare_for_hqq_linear( model, patch_params=patch_params, has_been_replaced=has_been_replaced ) # We store quantization config as linear_tag -> hqq quant config model.config.quantization_config = patch_params if not has_been_replaced: logger.warning("No linear modules were found in your model for quantization.") return model
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/integrations/peft.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import warnings from typing import Any, Dict, List, Optional, Union from ..utils import ( check_peft_version, find_adapter_config_file, is_accelerate_available, is_peft_available, is_torch_available, logging, ) if is_torch_available(): import torch if is_accelerate_available(): from accelerate import dispatch_model from accelerate.utils import get_balanced_memory, infer_auto_device_map # Minimum PEFT version supported for the integration MIN_PEFT_VERSION = "0.5.0" logger = logging.get_logger(__name__) class PeftAdapterMixin: """ A class containing all functions for loading and using adapters weights that are supported in PEFT library. For more details about adapters and injecting them on a transformer-based model, check out the documentation of PEFT library: https://huggingface.co/docs/peft/index Currently supported PEFT methods are all non-prefix tuning methods. Below is the list of supported PEFT methods that anyone can load, train and run with this mixin class: - Low Rank Adapters (LoRA): https://huggingface.co/docs/peft/conceptual_guides/lora - IA3: https://huggingface.co/docs/peft/conceptual_guides/ia3 - AdaLora: https://arxiv.org/abs/2303.10512 Other PEFT models such as prompt tuning, prompt learning are out of scope as these adapters are not "injectable" into a torch module. For using these methods, please refer to the usage guide of PEFT library. With this mixin, if the correct PEFT version is installed, it is possible to: - Load an adapter stored on a local path or in a remote Hub repository, and inject it in the model - Attach new adapters in the model and train them with Trainer or by your own. - Attach multiple adapters and iteratively activate / deactivate them - Activate / deactivate all adapters from the model. - Get the `state_dict` of the active adapter. """ _hf_peft_config_loaded = False def load_adapter( self, peft_model_id: Optional[str] = None, adapter_name: Optional[str] = None, revision: Optional[str] = None, token: Optional[str] = None, device_map: Optional[str] = "auto", max_memory: Optional[str] = None, offload_folder: Optional[str] = None, offload_index: Optional[int] = None, peft_config: Dict[str, Any] = None, adapter_state_dict: Optional[Dict[str, "torch.Tensor"]] = None, adapter_kwargs: Optional[Dict[str, Any]] = None, ) -> None: """ Load adapter weights from file or remote Hub folder. If you are not familiar with adapters and PEFT methods, we invite you to read more about them on PEFT official documentation: https://huggingface.co/docs/peft Requires peft as a backend to load the adapter weights. Args: peft_model_id (`str`, *optional*): The identifier of the model to look for on the Hub, or a local path to the saved adapter config file and adapter weights. adapter_name (`str`, *optional*): The adapter name to use. If not set, will use the default adapter. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. <Tip> To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>". </Tip> token (`str`, `optional`): Whether to use authentication token to load the remote folder. Userful to load private repositories that are on HuggingFace Hub. You might need to call `huggingface-cli login` and paste your tokens to cache it. device_map (`str` or `Dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank like `1`) on which the model will be allocated, the device map will map the entire model to this device. Passing `device_map = 0` means put the whole model on GPU 0. To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, `optional`): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. offload_index (`int`, `optional`): `offload_index` argument to be passed to `accelerate.dispatch_model` method. peft_config (`Dict[str, Any]`, *optional*): The configuration of the adapter to add, supported adapters are non-prefix tuning and adaption prompts methods. This argument is used in case users directly pass PEFT state dicts adapter_state_dict (`Dict[str, torch.Tensor]`, *optional*): The state dict of the adapter to load. This argument is used in case users directly pass PEFT state dicts adapter_kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to the `from_pretrained` method of the adapter config and `find_adapter_config_file` method. """ check_peft_version(min_version=MIN_PEFT_VERSION) adapter_name = adapter_name if adapter_name is not None else "default" if adapter_kwargs is None: adapter_kwargs = {} from peft import PeftConfig, inject_adapter_in_model, load_peft_weights from peft.utils import set_peft_model_state_dict if self._hf_peft_config_loaded and adapter_name in self.peft_config: raise ValueError(f"Adapter with name {adapter_name} already exists. Please use a different name.") if peft_model_id is None and (adapter_state_dict is None and peft_config is None): raise ValueError( "You should either pass a `peft_model_id` or a `peft_config` and `adapter_state_dict` to load an adapter." ) if "device" not in adapter_kwargs: device = self.device if not hasattr(self, "hf_device_map") else list(self.hf_device_map.values())[0] else: device = adapter_kwargs.pop("device") # To avoid PEFT errors later on with safetensors. if isinstance(device, torch.device): device = str(device) # We keep `revision` in the signature for backward compatibility if revision is not None and "revision" not in adapter_kwargs: adapter_kwargs["revision"] = revision elif revision is not None and "revision" in adapter_kwargs and revision != adapter_kwargs["revision"]: logger.error( "You passed a `revision` argument both in `adapter_kwargs` and as a standalone argument. " "The one in `adapter_kwargs` will be used." ) # Override token with adapter_kwargs' token if "token" in adapter_kwargs: token = adapter_kwargs.pop("token") if peft_config is None: adapter_config_file = find_adapter_config_file( peft_model_id, token=token, **adapter_kwargs, ) if adapter_config_file is None: raise ValueError( f"adapter model file not found in {peft_model_id}. Make sure you are passing the correct path to the " "adapter model." ) peft_config = PeftConfig.from_pretrained( peft_model_id, token=token, **adapter_kwargs, ) # Create and add fresh new adapters into the model. inject_adapter_in_model(peft_config, self, adapter_name) if not self._hf_peft_config_loaded: self._hf_peft_config_loaded = True if peft_model_id is not None: adapter_state_dict = load_peft_weights(peft_model_id, token=token, device=device, **adapter_kwargs) # We need to pre-process the state dict to remove unneeded prefixes - for backward compatibility processed_adapter_state_dict = {} prefix = "base_model.model." for key, value in adapter_state_dict.items(): if key.startswith(prefix): new_key = key[len(prefix) :] else: new_key = key processed_adapter_state_dict[new_key] = value # Load state dict incompatible_keys = set_peft_model_state_dict(self, processed_adapter_state_dict, adapter_name) if incompatible_keys is not None: # check only for unexpected keys if hasattr(incompatible_keys, "unexpected_keys") and len(incompatible_keys.unexpected_keys) > 0: logger.warning( f"Loading adapter weights from {peft_model_id} led to unexpected keys not found in the model: " f" {incompatible_keys.unexpected_keys}. " ) # Re-dispatch model and hooks in case the model is offloaded to CPU / Disk. if ( (getattr(self, "hf_device_map", None) is not None) and (len(set(self.hf_device_map.values()).intersection({"cpu", "disk"})) > 0) and len(self.peft_config) == 1 ): self._dispatch_accelerate_model( device_map=device_map, max_memory=max_memory, offload_folder=offload_folder, offload_index=offload_index, ) def add_adapter(self, adapter_config, adapter_name: Optional[str] = None) -> None: r""" If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Adds a fresh new adapter to the current model for training purpose. If no adapter name is passed, a default name is assigned to the adapter to follow the convention of PEFT library (in PEFT we use "default" as the default adapter name). Args: adapter_config (`~peft.PeftConfig`): The configuration of the adapter to add, supported adapters are non-prefix tuning and adaption prompts methods adapter_name (`str`, *optional*, defaults to `"default"`): The name of the adapter to add. If no name is passed, a default name is assigned to the adapter. """ check_peft_version(min_version=MIN_PEFT_VERSION) from peft import PeftConfig, inject_adapter_in_model adapter_name = adapter_name or "default" if not self._hf_peft_config_loaded: self._hf_peft_config_loaded = True elif adapter_name in self.peft_config: raise ValueError(f"Adapter with name {adapter_name} already exists. Please use a different name.") if not isinstance(adapter_config, PeftConfig): raise ValueError( f"adapter_config should be an instance of PeftConfig. Got {type(adapter_config)} instead." ) # Retrieve the name or path of the model, one could also use self.config._name_or_path # but to be consistent with what we do in PEFT: https://github.com/huggingface/peft/blob/6e783780ca9df3a623992cc4d1d665001232eae0/src/peft/mapping.py#L100 adapter_config.base_model_name_or_path = self.__dict__.get("name_or_path", None) inject_adapter_in_model(adapter_config, self, adapter_name) self.set_adapter(adapter_name) def set_adapter(self, adapter_name: Union[List[str], str]) -> None: """ If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Sets a specific adapter by forcing the model to use a that adapter and disable the other adapters. Args: adapter_name (`Union[List[str], str]`): The name of the adapter to set. Can be also a list of strings to set multiple adapters. """ check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError("No adapter loaded. Please load an adapter first.") elif isinstance(adapter_name, list): missing = set(adapter_name) - set(self.peft_config) if len(missing) > 0: raise ValueError( f"Following adapter(s) could not be found: {', '.join(missing)}. Make sure you are passing the correct adapter name(s)." f" current loaded adapters are: {list(self.peft_config.keys())}" ) elif adapter_name not in self.peft_config: raise ValueError( f"Adapter with name {adapter_name} not found. Please pass the correct adapter name among {list(self.peft_config.keys())}" ) from peft.tuners.tuners_utils import BaseTunerLayer from peft.utils import ModulesToSaveWrapper _adapters_has_been_set = False for _, module in self.named_modules(): if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): # For backward compatbility with previous PEFT versions if hasattr(module, "set_adapter"): module.set_adapter(adapter_name) else: module.active_adapter = adapter_name _adapters_has_been_set = True if not _adapters_has_been_set: raise ValueError( "Did not succeeded in setting the adapter. Please make sure you are using a model that supports adapters." ) def disable_adapters(self) -> None: r""" If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Disable all adapters that are attached to the model. This leads to inferring with the base model only. """ check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError("No adapter loaded. Please load an adapter first.") from peft.tuners.tuners_utils import BaseTunerLayer from peft.utils import ModulesToSaveWrapper for _, module in self.named_modules(): if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): # The recent version of PEFT need to call `enable_adapters` instead if hasattr(module, "enable_adapters"): module.enable_adapters(enabled=False) else: module.disable_adapters = True def enable_adapters(self) -> None: """ If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Enable adapters that are attached to the model. The model will use `self.active_adapter()` """ check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError("No adapter loaded. Please load an adapter first.") from peft.tuners.tuners_utils import BaseTunerLayer for _, module in self.named_modules(): if isinstance(module, BaseTunerLayer): # The recent version of PEFT need to call `enable_adapters` instead if hasattr(module, "enable_adapters"): module.enable_adapters(enabled=True) else: module.disable_adapters = False def active_adapters(self) -> List[str]: """ If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Gets the current active adapters of the model. In case of multi-adapter inference (combining multiple adapters for inference) returns the list of all active adapters so that users can deal with them accordingly. For previous PEFT versions (that does not support multi-adapter inference), `module.active_adapter` will return a single string. """ check_peft_version(min_version=MIN_PEFT_VERSION) if not is_peft_available(): raise ImportError("PEFT is not available. Please install PEFT to use this function: `pip install peft`.") if not self._hf_peft_config_loaded: raise ValueError("No adapter loaded. Please load an adapter first.") from peft.tuners.tuners_utils import BaseTunerLayer for _, module in self.named_modules(): if isinstance(module, BaseTunerLayer): active_adapters = module.active_adapter break # For previous PEFT versions if isinstance(active_adapters, str): active_adapters = [active_adapters] return active_adapters def active_adapter(self) -> str: warnings.warn( "The `active_adapter` method is deprecated and will be removed in a future version.", FutureWarning ) return self.active_adapters()[0] def get_adapter_state_dict(self, adapter_name: Optional[str] = None) -> dict: """ If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft Gets the adapter state dict that should only contain the weights tensors of the specified adapter_name adapter. If no adapter_name is passed, the active adapter is used. Args: adapter_name (`str`, *optional*): The name of the adapter to get the state dict from. If no name is passed, the active adapter is used. """ check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError("No adapter loaded. Please load an adapter first.") from peft import get_peft_model_state_dict if adapter_name is None: adapter_name = self.active_adapter() adapter_state_dict = get_peft_model_state_dict(self, adapter_name=adapter_name) return adapter_state_dict def _dispatch_accelerate_model( self, device_map: str, max_memory: Optional[int] = None, offload_folder: Optional[str] = None, offload_index: Optional[int] = None, ) -> None: """ Optional re-dispatch the model and attach new hooks to the model in case the model has been loaded with accelerate (i.e. with `device_map=xxx`) Args: device_map (`str` or `Dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank like `1`) on which the model will be allocated, the device map will map the entire model to this device. Passing `device_map = 0` means put the whole model on GPU 0. To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, *optional*): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. offload_index (`int`, *optional*): The offload_index argument to be passed to `accelerate.dispatch_model` method. """ dispatch_model_kwargs = {} # Safety checker for previous `accelerate` versions # `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/ if "offload_index" in inspect.signature(dispatch_model).parameters: dispatch_model_kwargs["offload_index"] = offload_index no_split_module_classes = self._no_split_modules if device_map != "sequential": max_memory = get_balanced_memory( self, max_memory=max_memory, no_split_module_classes=no_split_module_classes, low_zero=(device_map == "balanced_low_0"), ) if isinstance(device_map, str): device_map = infer_auto_device_map( self, max_memory=max_memory, no_split_module_classes=no_split_module_classes ) dispatch_model( self, device_map=device_map, offload_dir=offload_folder, **dispatch_model_kwargs, )
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/integrations/integration_utils.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Integrations with other Python libraries. """ import functools import importlib.metadata import importlib.util import json import numbers import os import pickle import shutil import sys import tempfile from dataclasses import asdict, fields from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, Literal, Optional, Union import numpy as np import packaging.version from .. import PreTrainedModel, TFPreTrainedModel from .. import __version__ as version from ..utils import ( PushToHubMixin, flatten_dict, is_datasets_available, is_pandas_available, is_tf_available, is_torch_available, logging, ) logger = logging.get_logger(__name__) if is_torch_available(): import torch # comet_ml requires to be imported before any ML frameworks _has_comet = importlib.util.find_spec("comet_ml") is not None and os.getenv("COMET_MODE", "").upper() != "DISABLED" if _has_comet: try: import comet_ml # noqa: F401 if hasattr(comet_ml, "config") and comet_ml.config.get_config("comet.api_key"): _has_comet = True else: if os.getenv("COMET_MODE", "").upper() != "DISABLED": logger.warning("comet_ml is installed but `COMET_API_KEY` is not set.") _has_comet = False except (ImportError, ValueError): _has_comet = False _has_neptune = ( importlib.util.find_spec("neptune") is not None or importlib.util.find_spec("neptune-client") is not None ) if TYPE_CHECKING and _has_neptune: try: _neptune_version = importlib.metadata.version("neptune") logger.info(f"Neptune version {_neptune_version} available.") except importlib.metadata.PackageNotFoundError: try: _neptune_version = importlib.metadata.version("neptune-client") logger.info(f"Neptune-client version {_neptune_version} available.") except importlib.metadata.PackageNotFoundError: _has_neptune = False from .. import modelcard # noqa: E402 from ..trainer_callback import ProgressCallback, TrainerCallback # noqa: E402 from ..trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy # noqa: E402 from ..training_args import ParallelMode # noqa: E402 from ..utils import ENV_VARS_TRUE_VALUES, is_torch_xla_available # noqa: E402 # Integration functions: def is_wandb_available(): # any value of WANDB_DISABLED disables wandb if os.getenv("WANDB_DISABLED", "").upper() in ENV_VARS_TRUE_VALUES: logger.warning( "Using the `WANDB_DISABLED` environment variable is deprecated and will be removed in v5. Use the " "--report_to flag to control the integrations used for logging result (for instance --report_to none)." ) return False return importlib.util.find_spec("wandb") is not None def is_clearml_available(): return importlib.util.find_spec("clearml") is not None def is_comet_available(): return _has_comet def is_tensorboard_available(): return importlib.util.find_spec("tensorboard") is not None or importlib.util.find_spec("tensorboardX") is not None def is_optuna_available(): return importlib.util.find_spec("optuna") is not None def is_ray_available(): return importlib.util.find_spec("ray") is not None def is_ray_tune_available(): if not is_ray_available(): return False return importlib.util.find_spec("ray.tune") is not None def is_sigopt_available(): return importlib.util.find_spec("sigopt") is not None def is_azureml_available(): if importlib.util.find_spec("azureml") is None: return False if importlib.util.find_spec("azureml.core") is None: return False return importlib.util.find_spec("azureml.core.run") is not None def is_mlflow_available(): if os.getenv("DISABLE_MLFLOW_INTEGRATION", "FALSE").upper() == "TRUE": return False return importlib.util.find_spec("mlflow") is not None def is_dagshub_available(): return None not in [importlib.util.find_spec("dagshub"), importlib.util.find_spec("mlflow")] def is_neptune_available(): return _has_neptune def is_codecarbon_available(): return importlib.util.find_spec("codecarbon") is not None def is_flytekit_available(): return importlib.util.find_spec("flytekit") is not None def is_flyte_deck_standard_available(): if not is_flytekit_available(): return False return importlib.util.find_spec("flytekitplugins.deck") is not None def is_dvclive_available(): return importlib.util.find_spec("dvclive") is not None def hp_params(trial): if is_optuna_available(): import optuna if isinstance(trial, optuna.Trial): return trial.params if is_ray_tune_available(): if isinstance(trial, dict): return trial if is_sigopt_available(): if isinstance(trial, dict): return trial if is_wandb_available(): if isinstance(trial, dict): return trial raise RuntimeError(f"Unknown type for trial {trial.__class__}") def run_hp_search_optuna(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: import optuna if trainer.args.process_index == 0: def _objective(trial, checkpoint_dir=None): checkpoint = None if checkpoint_dir: for subdir in os.listdir(checkpoint_dir): if subdir.startswith(PREFIX_CHECKPOINT_DIR): checkpoint = os.path.join(checkpoint_dir, subdir) trainer.objective = None if trainer.args.world_size > 1: if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: raise RuntimeError("only support DDP optuna HPO for ParallelMode.DISTRIBUTED currently.") trainer._hp_search_setup(trial) torch.distributed.broadcast_object_list(pickle.dumps(trainer.args), src=0) trainer.train(resume_from_checkpoint=checkpoint) else: trainer.train(resume_from_checkpoint=checkpoint, trial=trial) # If there hasn't been any evaluation during the training loop. if getattr(trainer, "objective", None) is None: metrics = trainer.evaluate() trainer.objective = trainer.compute_objective(metrics) return trainer.objective timeout = kwargs.pop("timeout", None) n_jobs = kwargs.pop("n_jobs", 1) directions = direction if isinstance(direction, list) else None direction = None if directions is not None else direction study = optuna.create_study(direction=direction, directions=directions, **kwargs) study.optimize(_objective, n_trials=n_trials, timeout=timeout, n_jobs=n_jobs) if not study._is_multi_objective(): best_trial = study.best_trial return BestRun(str(best_trial.number), best_trial.value, best_trial.params) else: best_trials = study.best_trials return [BestRun(str(best.number), best.values, best.params) for best in best_trials] else: for i in range(n_trials): trainer.objective = None args_main_rank = list(pickle.dumps(trainer.args)) if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: raise RuntimeError("only support DDP optuna HPO for ParallelMode.DISTRIBUTED currently.") torch.distributed.broadcast_object_list(args_main_rank, src=0) args = pickle.loads(bytes(args_main_rank)) for key, value in asdict(args).items(): if key != "local_rank": setattr(trainer.args, key, value) trainer.train(resume_from_checkpoint=None) # If there hasn't been any evaluation during the training loop. if getattr(trainer, "objective", None) is None: metrics = trainer.evaluate() trainer.objective = trainer.compute_objective(metrics) return None def run_hp_search_ray(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: import ray import ray.train def _objective(trial: dict, local_trainer): try: from transformers.utils.notebook import NotebookProgressCallback if local_trainer.pop_callback(NotebookProgressCallback): local_trainer.add_callback(ProgressCallback) except ModuleNotFoundError: pass local_trainer.objective = None checkpoint = ray.train.get_checkpoint() if checkpoint: # Upon trial resume, the local_trainer's objective gets reset to None. # If `local_trainer.train` is a noop (training has already reached # the target number of epochs/steps), then this would # trigger an unnecessary extra checkpoint at the end of training. # -> Set the objective to a dummy value upon resume as a workaround. local_trainer.objective = "objective" with checkpoint.as_directory() as checkpoint_dir: checkpoint_path = next(Path(checkpoint_dir).glob(f"{PREFIX_CHECKPOINT_DIR}*")).as_posix() local_trainer.train(resume_from_checkpoint=checkpoint_path, trial=trial) else: local_trainer.train(trial=trial) # If there hasn't been any evaluation during the training loop. if getattr(local_trainer, "objective", None) is None: metrics = local_trainer.evaluate() local_trainer.objective = local_trainer.compute_objective(metrics) metrics.update({"objective": local_trainer.objective, "done": True}) with tempfile.TemporaryDirectory() as temp_checkpoint_dir: local_trainer._tune_save_checkpoint(checkpoint_dir=temp_checkpoint_dir) checkpoint = ray.train.Checkpoint.from_directory(temp_checkpoint_dir) ray.train.report(metrics, checkpoint=checkpoint) if not trainer._memory_tracker.skip_memory_metrics: from ..trainer_utils import TrainerMemoryTracker logger.warning( "Memory tracking for your Trainer is currently " "enabled. Automatically disabling the memory tracker " "since the memory tracker is not serializable." ) trainer._memory_tracker = TrainerMemoryTracker(skip_memory_metrics=True) # The model and TensorBoard writer do not pickle so we have to remove them (if they exists) # while doing the ray hp search. _tb_writer = trainer.pop_callback(TensorBoardCallback) trainer.model = None # Setup default `resources_per_trial`. if "resources_per_trial" not in kwargs: # Default to 1 CPU and 1 GPU (if applicable) per trial. kwargs["resources_per_trial"] = {"cpu": 1} if trainer.args.n_gpu > 0: kwargs["resources_per_trial"]["gpu"] = 1 resource_msg = "1 CPU" + (" and 1 GPU" if trainer.args.n_gpu > 0 else "") logger.info( "No `resources_per_trial` arg was passed into " "`hyperparameter_search`. Setting it to a default value " f"of {resource_msg} for each trial." ) # Make sure each trainer only uses GPUs that were allocated per trial. gpus_per_trial = kwargs["resources_per_trial"].get("gpu", 0) trainer.args._n_gpu = gpus_per_trial # Setup default `progress_reporter`. if "progress_reporter" not in kwargs: from ray.tune import CLIReporter kwargs["progress_reporter"] = CLIReporter(metric_columns=["objective"]) if "scheduler" in kwargs: from ray.tune.schedulers import ASHAScheduler, HyperBandForBOHB, MedianStoppingRule, PopulationBasedTraining # Check for `do_eval` and `eval_during_training` for schedulers that require intermediate reporting. if isinstance( kwargs["scheduler"], (ASHAScheduler, MedianStoppingRule, HyperBandForBOHB, PopulationBasedTraining) ) and (not trainer.args.do_eval or trainer.args.eval_strategy == IntervalStrategy.NO): raise RuntimeError( "You are using {cls} as a scheduler but you haven't enabled evaluation during training. " "This means your trials will not report intermediate results to Ray Tune, and " "can thus not be stopped early or used to exploit other trials parameters. " "If this is what you want, do not use {cls}. If you would like to use {cls}, " "make sure you pass `do_eval=True` and `eval_strategy='steps'` in the " "Trainer `args`.".format(cls=type(kwargs["scheduler"]).__name__) ) trainable = ray.tune.with_parameters(_objective, local_trainer=trainer) @functools.wraps(trainable) def dynamic_modules_import_trainable(*args, **kwargs): """ Wrapper around `tune.with_parameters` to ensure datasets_modules are loaded on each Actor. Without this, an ImportError will be thrown. See https://github.com/huggingface/transformers/issues/11565. Assumes that `_objective`, defined above, is a function. """ if is_datasets_available(): import datasets.load dynamic_modules_path = os.path.join(datasets.load.init_dynamic_modules(), "__init__.py") # load dynamic_modules from path spec = importlib.util.spec_from_file_location("datasets_modules", dynamic_modules_path) datasets_modules = importlib.util.module_from_spec(spec) sys.modules[spec.name] = datasets_modules spec.loader.exec_module(datasets_modules) return trainable(*args, **kwargs) # special attr set by tune.with_parameters if hasattr(trainable, "__mixins__"): dynamic_modules_import_trainable.__mixins__ = trainable.__mixins__ analysis = ray.tune.run( dynamic_modules_import_trainable, config=trainer.hp_space(None), num_samples=n_trials, **kwargs, ) best_trial = analysis.get_best_trial(metric="objective", mode=direction[:3], scope=trainer.args.ray_scope) best_run = BestRun(best_trial.trial_id, best_trial.last_result["objective"], best_trial.config, analysis) if _tb_writer is not None: trainer.add_callback(_tb_writer) return best_run def run_hp_search_sigopt(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: import sigopt if trainer.args.process_index == 0: if importlib.metadata.version("sigopt") >= "8.0.0": sigopt.set_project("huggingface") experiment = sigopt.create_experiment( name="huggingface-tune", type="offline", parameters=trainer.hp_space(None), metrics=[{"name": "objective", "objective": direction, "strategy": "optimize"}], parallel_bandwidth=1, budget=n_trials, ) logger.info(f"created experiment: https://app.sigopt.com/experiment/{experiment.id}") for run in experiment.loop(): with run: trainer.objective = None if trainer.args.world_size > 1: if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: raise RuntimeError("only support DDP Sigopt HPO for ParallelMode.DISTRIBUTED currently.") trainer._hp_search_setup(run.run) torch.distributed.broadcast_object_list(pickle.dumps(trainer.args), src=0) trainer.train(resume_from_checkpoint=None) else: trainer.train(resume_from_checkpoint=None, trial=run.run) # If there hasn't been any evaluation during the training loop. if getattr(trainer, "objective", None) is None: metrics = trainer.evaluate() trainer.objective = trainer.compute_objective(metrics) run.log_metric("objective", trainer.objective) best = list(experiment.get_best_runs())[0] best_run = BestRun(best.id, best.values["objective"].value, best.assignments) else: from sigopt import Connection conn = Connection() proxies = kwargs.pop("proxies", None) if proxies is not None: conn.set_proxies(proxies) experiment = conn.experiments().create( name="huggingface-tune", parameters=trainer.hp_space(None), metrics=[{"name": "objective", "objective": direction, "strategy": "optimize"}], parallel_bandwidth=1, observation_budget=n_trials, project="huggingface", ) logger.info(f"created experiment: https://app.sigopt.com/experiment/{experiment.id}") while experiment.progress.observation_count < experiment.observation_budget: suggestion = conn.experiments(experiment.id).suggestions().create() trainer.objective = None if trainer.args.world_size > 1: if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: raise RuntimeError("only support DDP Sigopt HPO for ParallelMode.DISTRIBUTED currently.") trainer._hp_search_setup(suggestion) torch.distributed.broadcast_object_list(pickle.dumps(trainer.args), src=0) trainer.train(resume_from_checkpoint=None) else: trainer.train(resume_from_checkpoint=None, trial=suggestion) # If there hasn't been any evaluation during the training loop. if getattr(trainer, "objective", None) is None: metrics = trainer.evaluate() trainer.objective = trainer.compute_objective(metrics) values = [{"name": "objective", "value": trainer.objective}] obs = conn.experiments(experiment.id).observations().create(suggestion=suggestion.id, values=values) logger.info(f"[suggestion_id, observation_id]: [{suggestion.id}, {obs.id}]") experiment = conn.experiments(experiment.id).fetch() best = list(conn.experiments(experiment.id).best_assignments().fetch().iterate_pages())[0] best_run = BestRun(best.id, best.value, best.assignments) return best_run else: for i in range(n_trials): trainer.objective = None args_main_rank = list(pickle.dumps(trainer.args)) if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: raise RuntimeError("only support DDP Sigopt HPO for ParallelMode.DISTRIBUTED currently.") torch.distributed.broadcast_object_list(args_main_rank, src=0) args = pickle.loads(bytes(args_main_rank)) for key, value in asdict(args).items(): if key != "local_rank": setattr(trainer.args, key, value) trainer.train(resume_from_checkpoint=None) # If there hasn't been any evaluation during the training loop. if getattr(trainer, "objective", None) is None: metrics = trainer.evaluate() trainer.objective = trainer.compute_objective(metrics) return None def run_hp_search_wandb(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: from ..integrations import is_wandb_available if not is_wandb_available(): raise ImportError("This function needs wandb installed: `pip install wandb`") import wandb # add WandbCallback if not already added in trainer callbacks reporting_to_wandb = False for callback in trainer.callback_handler.callbacks: if isinstance(callback, WandbCallback): reporting_to_wandb = True break if not reporting_to_wandb: trainer.add_callback(WandbCallback()) trainer.args.report_to = ["wandb"] best_trial = {"run_id": None, "objective": None, "hyperparameters": None} sweep_id = kwargs.pop("sweep_id", None) project = kwargs.pop("project", None) name = kwargs.pop("name", None) entity = kwargs.pop("entity", None) metric = kwargs.pop("metric", "eval/loss") sweep_config = trainer.hp_space(None) sweep_config["metric"]["goal"] = direction sweep_config["metric"]["name"] = metric if name: sweep_config["name"] = name def _objective(): run = wandb.run if wandb.run else wandb.init() trainer.state.trial_name = run.name run.config.update({"assignments": {}, "metric": metric}) config = wandb.config trainer.objective = None trainer.train(resume_from_checkpoint=None, trial=vars(config)["_items"]) # If there hasn't been any evaluation during the training loop. if getattr(trainer, "objective", None) is None: metrics = trainer.evaluate() trainer.objective = trainer.compute_objective(metrics) format_metrics = rewrite_logs(metrics) if metric not in format_metrics: logger.warning( f"Provided metric {metric} not found. This might result in unexpected sweeps charts. The available" f" metrics are {format_metrics.keys()}" ) best_score = False if best_trial["run_id"] is not None: if direction == "minimize": best_score = trainer.objective < best_trial["objective"] elif direction == "maximize": best_score = trainer.objective > best_trial["objective"] if best_score or best_trial["run_id"] is None: best_trial["run_id"] = run.id best_trial["objective"] = trainer.objective best_trial["hyperparameters"] = dict(config) return trainer.objective sweep_id = wandb.sweep(sweep_config, project=project, entity=entity) if not sweep_id else sweep_id logger.info(f"wandb sweep id - {sweep_id}") wandb.agent(sweep_id, function=_objective, count=n_trials) return BestRun(best_trial["run_id"], best_trial["objective"], best_trial["hyperparameters"]) def get_available_reporting_integrations(): integrations = [] if is_azureml_available() and not is_mlflow_available(): integrations.append("azure_ml") if is_comet_available(): integrations.append("comet_ml") if is_dagshub_available(): integrations.append("dagshub") if is_dvclive_available(): integrations.append("dvclive") if is_mlflow_available(): integrations.append("mlflow") if is_neptune_available(): integrations.append("neptune") if is_tensorboard_available(): integrations.append("tensorboard") if is_wandb_available(): integrations.append("wandb") if is_codecarbon_available(): integrations.append("codecarbon") if is_clearml_available(): integrations.append("clearml") return integrations def rewrite_logs(d): new_d = {} eval_prefix = "eval_" eval_prefix_len = len(eval_prefix) test_prefix = "test_" test_prefix_len = len(test_prefix) for k, v in d.items(): if k.startswith(eval_prefix): new_d["eval/" + k[eval_prefix_len:]] = v elif k.startswith(test_prefix): new_d["test/" + k[test_prefix_len:]] = v else: new_d["train/" + k] = v return new_d class TensorBoardCallback(TrainerCallback): """ A [`TrainerCallback`] that sends the logs to [TensorBoard](https://www.tensorflow.org/tensorboard). Args: tb_writer (`SummaryWriter`, *optional*): The writer to use. Will instantiate one if not set. """ def __init__(self, tb_writer=None): has_tensorboard = is_tensorboard_available() if not has_tensorboard: raise RuntimeError( "TensorBoardCallback requires tensorboard to be installed. Either update your PyTorch version or" " install tensorboardX." ) if has_tensorboard: try: from torch.utils.tensorboard import SummaryWriter # noqa: F401 self._SummaryWriter = SummaryWriter except ImportError: try: from tensorboardX import SummaryWriter self._SummaryWriter = SummaryWriter except ImportError: self._SummaryWriter = None else: self._SummaryWriter = None self.tb_writer = tb_writer def _init_summary_writer(self, args, log_dir=None): log_dir = log_dir or args.logging_dir if self._SummaryWriter is not None: self.tb_writer = self._SummaryWriter(log_dir=log_dir) def on_train_begin(self, args, state, control, **kwargs): if not state.is_world_process_zero: return log_dir = None if state.is_hyper_param_search: trial_name = state.trial_name if trial_name is not None: log_dir = os.path.join(args.logging_dir, trial_name) if self.tb_writer is None: self._init_summary_writer(args, log_dir) if self.tb_writer is not None: self.tb_writer.add_text("args", args.to_json_string()) if "model" in kwargs: model = kwargs["model"] if hasattr(model, "config") and model.config is not None: model_config_json = model.config.to_json_string() self.tb_writer.add_text("model_config", model_config_json) def on_log(self, args, state, control, logs=None, **kwargs): if not state.is_world_process_zero: return if self.tb_writer is None: self._init_summary_writer(args) if self.tb_writer is not None: logs = rewrite_logs(logs) for k, v in logs.items(): if isinstance(v, (int, float)): self.tb_writer.add_scalar(k, v, state.global_step) else: logger.warning( "Trainer is attempting to log a value of " f'"{v}" of type {type(v)} for key "{k}" as a scalar. ' "This invocation of Tensorboard's writer.add_scalar() " "is incorrect so we dropped this attribute." ) self.tb_writer.flush() def on_train_end(self, args, state, control, **kwargs): if self.tb_writer: self.tb_writer.close() self.tb_writer = None def save_model_architecture_to_file(model: Any, output_dir: str): with open(f"{output_dir}/model_architecture.txt", "w+") as f: if isinstance(model, PreTrainedModel): print(model, file=f) elif is_tf_available() and isinstance(model, TFPreTrainedModel): def print_to_file(s): print(s, file=f) model.summary(print_fn=print_to_file) elif is_torch_available() and ( isinstance(model, (torch.nn.Module, PushToHubMixin)) and hasattr(model, "base_model") ): print(model, file=f) class WandbCallback(TrainerCallback): """ A [`TrainerCallback`] that logs metrics, media, model checkpoints to [Weight and Biases](https://www.wandb.com/). """ def __init__(self): has_wandb = is_wandb_available() if not has_wandb: raise RuntimeError("WandbCallback requires wandb to be installed. Run `pip install wandb`.") if has_wandb: import wandb self._wandb = wandb self._initialized = False # log model if os.getenv("WANDB_LOG_MODEL", "FALSE").upper() in ENV_VARS_TRUE_VALUES.union({"TRUE"}): DeprecationWarning( f"Setting `WANDB_LOG_MODEL` as {os.getenv('WANDB_LOG_MODEL')} is deprecated and will be removed in " "version 5 of transformers. Use one of `'end'` or `'checkpoint'` instead." ) logger.info(f"Setting `WANDB_LOG_MODEL` from {os.getenv('WANDB_LOG_MODEL')} to `end` instead") self._log_model = "end" else: self._log_model = os.getenv("WANDB_LOG_MODEL", "false").lower() def setup(self, args, state, model, **kwargs): """ Setup the optional Weights & Biases (*wandb*) integration. One can subclass and override this method to customize the setup if needed. Find more information [here](https://docs.wandb.ai/guides/integrations/huggingface). You can also override the following environment variables: Environment: - **WANDB_LOG_MODEL** (`str`, *optional*, defaults to `"false"`): Whether to log model and checkpoints during training. Can be `"end"`, `"checkpoint"` or `"false"`. If set to `"end"`, the model will be uploaded at the end of training. If set to `"checkpoint"`, the checkpoint will be uploaded every `args.save_steps` . If set to `"false"`, the model will not be uploaded. Use along with [`~transformers.TrainingArguments.load_best_model_at_end`] to upload best model. <Deprecated version="5.0"> Setting `WANDB_LOG_MODEL` as `bool` will be deprecated in version 5 of 🤗 Transformers. </Deprecated> - **WANDB_WATCH** (`str`, *optional* defaults to `"false"`): Can be `"gradients"`, `"all"`, `"parameters"`, or `"false"`. Set to `"all"` to log gradients and parameters. - **WANDB_PROJECT** (`str`, *optional*, defaults to `"huggingface"`): Set this to a custom string to store results in a different project. - **WANDB_DISABLED** (`bool`, *optional*, defaults to `False`): Whether to disable wandb entirely. Set `WANDB_DISABLED=true` to disable. """ if self._wandb is None: return self._initialized = True if state.is_world_process_zero: logger.info( 'Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"' ) combined_dict = {**args.to_dict()} if hasattr(model, "config") and model.config is not None: model_config = model.config.to_dict() combined_dict = {**model_config, **combined_dict} if hasattr(model, "peft_config") and model.peft_config is not None: peft_config = model.peft_config combined_dict = {**{"peft_config": peft_config}, **combined_dict} trial_name = state.trial_name init_args = {} if trial_name is not None: init_args["name"] = trial_name init_args["group"] = args.run_name elif args.run_name is not None: init_args["name"] = args.run_name if args.run_name == args.output_dir: self._wandb.termwarn( "The `run_name` is currently set to the same value as `TrainingArguments.output_dir`. If this was " "not intended, please specify a different run name by setting the `TrainingArguments.run_name` parameter.", repeat=False, ) if self._wandb.run is None: self._wandb.init( project=os.getenv("WANDB_PROJECT", "huggingface"), **init_args, ) # add config parameters (run may have been created manually) self._wandb.config.update(combined_dict, allow_val_change=True) # define default x-axis (for latest wandb versions) if getattr(self._wandb, "define_metric", None): self._wandb.define_metric("train/global_step") self._wandb.define_metric("*", step_metric="train/global_step", step_sync=True) # keep track of model topology and gradients, unsupported on TPU _watch_model = os.getenv("WANDB_WATCH", "false") if not is_torch_xla_available() and _watch_model in ("all", "parameters", "gradients"): self._wandb.watch(model, log=_watch_model, log_freq=max(100, state.logging_steps)) self._wandb.run._label(code="transformers_trainer") # add number of model parameters to wandb config try: self._wandb.config["model/num_parameters"] = model.num_parameters() except AttributeError: logger.info("Could not log the number of model parameters in Weights & Biases.") # log the initial model and architecture to an artifact with tempfile.TemporaryDirectory() as temp_dir: model_name = ( f"model-{self._wandb.run.id}" if (args.run_name is None or args.run_name == args.output_dir) else f"model-{self._wandb.run.name}" ) model_artifact = self._wandb.Artifact( name=model_name, type="model", metadata={ "model_config": model.config.to_dict() if hasattr(model, "config") else None, "num_parameters": self._wandb.config.get("model/num_parameters"), "initial_model": True, }, ) model.save_pretrained(temp_dir) # add the architecture to a separate text file save_model_architecture_to_file(model, temp_dir) for f in Path(temp_dir).glob("*"): if f.is_file(): with model_artifact.new_file(f.name, mode="wb") as fa: fa.write(f.read_bytes()) self._wandb.run.log_artifact(model_artifact, aliases=["base_model"]) badge_markdown = ( f'[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge' f'-28.svg" alt="Visualize in Weights & Biases" width="20' f'0" height="32"/>]({self._wandb.run.get_url()})' ) modelcard.AUTOGENERATED_TRAINER_COMMENT += f"\n{badge_markdown}" def on_train_begin(self, args, state, control, model=None, **kwargs): if self._wandb is None: return hp_search = state.is_hyper_param_search if hp_search: self._wandb.finish() self._initialized = False args.run_name = None if not self._initialized: self.setup(args, state, model, **kwargs) def on_train_end(self, args, state, control, model=None, tokenizer=None, **kwargs): if self._wandb is None: return if self._log_model in ("end", "checkpoint") and self._initialized and state.is_world_process_zero: from ..trainer import Trainer fake_trainer = Trainer(args=args, model=model, tokenizer=tokenizer) with tempfile.TemporaryDirectory() as temp_dir: fake_trainer.save_model(temp_dir) metadata = ( { k: v for k, v in dict(self._wandb.summary).items() if isinstance(v, numbers.Number) and not k.startswith("_") } if not args.load_best_model_at_end else { f"eval/{args.metric_for_best_model}": state.best_metric, "train/total_floss": state.total_flos, "model/num_parameters": self._wandb.config.get("model/num_parameters"), } ) metadata["final_model"] = True logger.info("Logging model artifacts. ...") model_name = ( f"model-{self._wandb.run.id}" if (args.run_name is None or args.run_name == args.output_dir) else f"model-{self._wandb.run.name}" ) # add the model architecture to a separate text file save_model_architecture_to_file(model, temp_dir) artifact = self._wandb.Artifact(name=model_name, type="model", metadata=metadata) for f in Path(temp_dir).glob("*"): if f.is_file(): with artifact.new_file(f.name, mode="wb") as fa: fa.write(f.read_bytes()) self._wandb.run.log_artifact(artifact, aliases=["final_model"]) def on_log(self, args, state, control, model=None, logs=None, **kwargs): single_value_scalars = [ "train_runtime", "train_samples_per_second", "train_steps_per_second", "train_loss", "total_flos", ] if self._wandb is None: return if not self._initialized: self.setup(args, state, model) if state.is_world_process_zero: for k, v in logs.items(): if k in single_value_scalars: self._wandb.run.summary[k] = v non_scalar_logs = {k: v for k, v in logs.items() if k not in single_value_scalars} non_scalar_logs = rewrite_logs(non_scalar_logs) self._wandb.log({**non_scalar_logs, "train/global_step": state.global_step}) def on_save(self, args, state, control, **kwargs): if self._log_model == "checkpoint" and self._initialized and state.is_world_process_zero: checkpoint_metadata = { k: v for k, v in dict(self._wandb.summary).items() if isinstance(v, numbers.Number) and not k.startswith("_") } checkpoint_metadata["model/num_parameters"] = self._wandb.config.get("model/num_parameters") ckpt_dir = f"checkpoint-{state.global_step}" artifact_path = os.path.join(args.output_dir, ckpt_dir) logger.info(f"Logging checkpoint artifacts in {ckpt_dir}. ...") checkpoint_name = ( f"model-{self._wandb.run.id}" if (args.run_name is None or args.run_name == args.output_dir) else f"model-{self._wandb.run.name}" ) artifact = self._wandb.Artifact(name=checkpoint_name, type="model", metadata=checkpoint_metadata) artifact.add_dir(artifact_path) self._wandb.log_artifact( artifact, aliases=[f"epoch_{round(state.epoch, 2)}", f"checkpoint_global_step_{state.global_step}"] ) def on_predict(self, args, state, control, metrics, **kwargs): if self._wandb is None: return if not self._initialized: self.setup(args, state, **kwargs) if state.is_world_process_zero: metrics = rewrite_logs(metrics) self._wandb.log(metrics) class CometCallback(TrainerCallback): """ A [`TrainerCallback`] that sends the logs to [Comet ML](https://www.comet.ml/site/). """ def __init__(self): if not _has_comet: raise RuntimeError("CometCallback requires comet-ml to be installed. Run `pip install comet-ml`.") self._initialized = False self._log_assets = False def setup(self, args, state, model): """ Setup the optional Comet.ml integration. Environment: - **COMET_MODE** (`str`, *optional*, defaults to `ONLINE`): Whether to create an online, offline experiment or disable Comet logging. Can be `OFFLINE`, `ONLINE`, or `DISABLED`. - **COMET_PROJECT_NAME** (`str`, *optional*): Comet project name for experiments. - **COMET_OFFLINE_DIRECTORY** (`str`, *optional*): Folder to use for saving offline experiments when `COMET_MODE` is `OFFLINE`. - **COMET_LOG_ASSETS** (`str`, *optional*, defaults to `TRUE`): Whether or not to log training assets (tf event logs, checkpoints, etc), to Comet. Can be `TRUE`, or `FALSE`. For a number of configurable items in the environment, see [here](https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables). """ self._initialized = True log_assets = os.getenv("COMET_LOG_ASSETS", "FALSE").upper() if log_assets in {"TRUE", "1"}: self._log_assets = True if state.is_world_process_zero: comet_mode = os.getenv("COMET_MODE", "ONLINE").upper() experiment = None experiment_kwargs = {"project_name": os.getenv("COMET_PROJECT_NAME", "huggingface")} if comet_mode == "ONLINE": experiment = comet_ml.Experiment(**experiment_kwargs) experiment.log_other("Created from", "transformers") logger.info("Automatic Comet.ml online logging enabled") elif comet_mode == "OFFLINE": experiment_kwargs["offline_directory"] = os.getenv("COMET_OFFLINE_DIRECTORY", "./") experiment = comet_ml.OfflineExperiment(**experiment_kwargs) experiment.log_other("Created from", "transformers") logger.info("Automatic Comet.ml offline logging enabled; use `comet upload` when finished") if experiment is not None: experiment._set_model_graph(model, framework="transformers") experiment._log_parameters(args, prefix="args/", framework="transformers") if hasattr(model, "config"): experiment._log_parameters(model.config, prefix="config/", framework="transformers") def on_train_begin(self, args, state, control, model=None, **kwargs): if not self._initialized: self.setup(args, state, model) def on_log(self, args, state, control, model=None, logs=None, **kwargs): if not self._initialized: self.setup(args, state, model) if state.is_world_process_zero: experiment = comet_ml.config.get_global_experiment() if experiment is not None: experiment._log_metrics(logs, step=state.global_step, epoch=state.epoch, framework="transformers") def on_train_end(self, args, state, control, **kwargs): if self._initialized and state.is_world_process_zero: experiment = comet_ml.config.get_global_experiment() if experiment is not None: if self._log_assets is True: logger.info("Logging checkpoints. This may take time.") experiment.log_asset_folder( args.output_dir, recursive=True, log_file_name=True, step=state.global_step ) experiment.end() class AzureMLCallback(TrainerCallback): """ A [`TrainerCallback`] that sends the logs to [AzureML](https://pypi.org/project/azureml-sdk/). """ def __init__(self, azureml_run=None): if not is_azureml_available(): raise RuntimeError("AzureMLCallback requires azureml to be installed. Run `pip install azureml-sdk`.") self.azureml_run = azureml_run def on_init_end(self, args, state, control, **kwargs): from azureml.core.run import Run if self.azureml_run is None and state.is_world_process_zero: self.azureml_run = Run.get_context() def on_log(self, args, state, control, logs=None, **kwargs): if self.azureml_run and state.is_world_process_zero: for k, v in logs.items(): if isinstance(v, (int, float)): self.azureml_run.log(k, v, description=k) class MLflowCallback(TrainerCallback): """ A [`TrainerCallback`] that sends the logs to [MLflow](https://www.mlflow.org/). Can be disabled by setting environment variable `DISABLE_MLFLOW_INTEGRATION = TRUE`. """ def __init__(self): if not is_mlflow_available(): raise RuntimeError("MLflowCallback requires mlflow to be installed. Run `pip install mlflow`.") import mlflow self._MAX_PARAM_VAL_LENGTH = mlflow.utils.validation.MAX_PARAM_VAL_LENGTH self._MAX_PARAMS_TAGS_PER_BATCH = mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH self._initialized = False self._auto_end_run = False self._log_artifacts = False self._ml_flow = mlflow def setup(self, args, state, model): """ Setup the optional MLflow integration. Environment: - **HF_MLFLOW_LOG_ARTIFACTS** (`str`, *optional*): Whether to use MLflow `.log_artifact()` facility to log artifacts. This only makes sense if logging to a remote server, e.g. s3 or GCS. If set to `True` or *1*, will copy each saved checkpoint on each save in [`TrainingArguments`]'s `output_dir` to the local or remote artifact storage. Using it without a remote storage will just copy the files to your artifact location. - **MLFLOW_TRACKING_URI** (`str`, *optional*): Whether to store runs at a specific path or remote server. Unset by default, which skips setting the tracking URI entirely. - **MLFLOW_EXPERIMENT_NAME** (`str`, *optional*, defaults to `None`): Whether to use an MLflow experiment_name under which to launch the run. Default to `None` which will point to the `Default` experiment in MLflow. Otherwise, it is a case sensitive name of the experiment to be activated. If an experiment with this name does not exist, a new experiment with this name is created. - **MLFLOW_TAGS** (`str`, *optional*): A string dump of a dictionary of key/value pair to be added to the MLflow run as tags. Example: `os.environ['MLFLOW_TAGS']='{"release.candidate": "RC1", "release.version": "2.2.0"}'`. - **MLFLOW_NESTED_RUN** (`str`, *optional*): Whether to use MLflow nested runs. If set to `True` or *1*, will create a nested run inside the current run. - **MLFLOW_RUN_ID** (`str`, *optional*): Allow to reattach to an existing run which can be usefull when resuming training from a checkpoint. When `MLFLOW_RUN_ID` environment variable is set, `start_run` attempts to resume a run with the specified run ID and other parameters are ignored. - **MLFLOW_FLATTEN_PARAMS** (`str`, *optional*, defaults to `False`): Whether to flatten the parameters dictionary before logging. """ self._log_artifacts = os.getenv("HF_MLFLOW_LOG_ARTIFACTS", "FALSE").upper() in ENV_VARS_TRUE_VALUES self._nested_run = os.getenv("MLFLOW_NESTED_RUN", "FALSE").upper() in ENV_VARS_TRUE_VALUES self._tracking_uri = os.getenv("MLFLOW_TRACKING_URI", None) self._experiment_name = os.getenv("MLFLOW_EXPERIMENT_NAME", None) self._flatten_params = os.getenv("MLFLOW_FLATTEN_PARAMS", "FALSE").upper() in ENV_VARS_TRUE_VALUES self._run_id = os.getenv("MLFLOW_RUN_ID", None) # "synchronous" flag is only available with mlflow version >= 2.8.0 # https://github.com/mlflow/mlflow/pull/9705 # https://github.com/mlflow/mlflow/releases/tag/v2.8.0 self._async_log = packaging.version.parse(self._ml_flow.__version__) >= packaging.version.parse("2.8.0") logger.debug( f"MLflow experiment_name={self._experiment_name}, run_name={args.run_name}, nested={self._nested_run}," f" tags={self._nested_run}, tracking_uri={self._tracking_uri}" ) if state.is_world_process_zero: if not self._ml_flow.is_tracking_uri_set(): if self._tracking_uri: self._ml_flow.set_tracking_uri(self._tracking_uri) logger.debug(f"MLflow tracking URI is set to {self._tracking_uri}") else: logger.debug( "Environment variable `MLFLOW_TRACKING_URI` is not provided and therefore will not be" " explicitly set." ) else: logger.debug(f"MLflow tracking URI is set to {self._ml_flow.get_tracking_uri()}") if self._ml_flow.active_run() is None or self._nested_run or self._run_id: if self._experiment_name: # Use of set_experiment() ensure that Experiment is created if not exists self._ml_flow.set_experiment(self._experiment_name) self._ml_flow.start_run(run_name=args.run_name, nested=self._nested_run) logger.debug(f"MLflow run started with run_id={self._ml_flow.active_run().info.run_id}") self._auto_end_run = True combined_dict = args.to_dict() if hasattr(model, "config") and model.config is not None: model_config = model.config.to_dict() combined_dict = {**model_config, **combined_dict} combined_dict = flatten_dict(combined_dict) if self._flatten_params else combined_dict # remove params that are too long for MLflow for name, value in list(combined_dict.items()): # internally, all values are converted to str in MLflow if len(str(value)) > self._MAX_PARAM_VAL_LENGTH: logger.warning( f'Trainer is attempting to log a value of "{value}" for key "{name}" as a parameter. MLflow\'s' " log_param() only accepts values no longer than 250 characters so we dropped this attribute." " You can use `MLFLOW_FLATTEN_PARAMS` environment variable to flatten the parameters and" " avoid this message." ) del combined_dict[name] # MLflow cannot log more than 100 values in one go, so we have to split it combined_dict_items = list(combined_dict.items()) for i in range(0, len(combined_dict_items), self._MAX_PARAMS_TAGS_PER_BATCH): if self._async_log: self._ml_flow.log_params( dict(combined_dict_items[i : i + self._MAX_PARAMS_TAGS_PER_BATCH]), synchronous=False ) else: self._ml_flow.log_params(dict(combined_dict_items[i : i + self._MAX_PARAMS_TAGS_PER_BATCH])) mlflow_tags = os.getenv("MLFLOW_TAGS", None) if mlflow_tags: mlflow_tags = json.loads(mlflow_tags) self._ml_flow.set_tags(mlflow_tags) self._initialized = True def on_train_begin(self, args, state, control, model=None, **kwargs): if not self._initialized: self.setup(args, state, model) def on_log(self, args, state, control, logs, model=None, **kwargs): if not self._initialized: self.setup(args, state, model) if state.is_world_process_zero: metrics = {} for k, v in logs.items(): if isinstance(v, (int, float)): metrics[k] = v elif isinstance(v, torch.Tensor) and v.numel() == 1: metrics[k] = v.item() else: logger.warning( f'Trainer is attempting to log a value of "{v}" of type {type(v)} for key "{k}" as a metric. ' "MLflow's log_metric() only accepts float and int types so we dropped this attribute." ) if self._async_log: self._ml_flow.log_metrics(metrics=metrics, step=state.global_step, synchronous=False) else: self._ml_flow.log_metrics(metrics=metrics, step=state.global_step) def on_train_end(self, args, state, control, **kwargs): if self._initialized and state.is_world_process_zero: if self._auto_end_run and self._ml_flow.active_run(): self._ml_flow.end_run() def on_save(self, args, state, control, **kwargs): if self._initialized and state.is_world_process_zero and self._log_artifacts: ckpt_dir = f"checkpoint-{state.global_step}" artifact_path = os.path.join(args.output_dir, ckpt_dir) logger.info(f"Logging checkpoint artifacts in {ckpt_dir}. This may take time.") self._ml_flow.pyfunc.log_model( ckpt_dir, artifacts={"model_path": artifact_path}, python_model=self._ml_flow.pyfunc.PythonModel(), ) def __del__(self): # if the previous run is not terminated correctly, the fluent API will # not let you start a new run before the previous one is killed if ( self._auto_end_run and callable(getattr(self._ml_flow, "active_run", None)) and self._ml_flow.active_run() is not None ): self._ml_flow.end_run() class DagsHubCallback(MLflowCallback): """ A [`TrainerCallback`] that logs to [DagsHub](https://dagshub.com/). Extends [`MLflowCallback`] """ def __init__(self): super().__init__() if not is_dagshub_available(): raise ImportError("DagsHubCallback requires dagshub to be installed. Run `pip install dagshub`.") from dagshub.upload import Repo self.Repo = Repo def setup(self, *args, **kwargs): """ Setup the DagsHub's Logging integration. Environment: - **HF_DAGSHUB_LOG_ARTIFACTS** (`str`, *optional*): Whether to save the data and model artifacts for the experiment. Default to `False`. """ self.log_artifacts = os.getenv("HF_DAGSHUB_LOG_ARTIFACTS", "FALSE").upper() in ENV_VARS_TRUE_VALUES self.name = os.getenv("HF_DAGSHUB_MODEL_NAME") or "main" self.remote = os.getenv("MLFLOW_TRACKING_URI") self.repo = self.Repo( owner=self.remote.split(os.sep)[-2], name=self.remote.split(os.sep)[-1].split(".")[0], branch=os.getenv("BRANCH") or "main", ) self.path = Path("artifacts") if self.remote is None: raise RuntimeError( "DagsHubCallback requires the `MLFLOW_TRACKING_URI` environment variable to be set. Did you run" " `dagshub.init()`?" ) super().setup(*args, **kwargs) def on_train_end(self, args, state, control, **kwargs): if self.log_artifacts: if getattr(self, "train_dataloader", None): torch.save(self.train_dataloader.dataset, os.path.join(args.output_dir, "dataset.pt")) self.repo.directory(str(self.path)).add_dir(args.output_dir) class NeptuneMissingConfiguration(Exception): def __init__(self): super().__init__( """ ------ Unsupported ---- We were not able to create new runs. You provided a custom Neptune run to `NeptuneCallback` with the `run` argument. For the integration to work fully, provide your `api_token` and `project` by saving them as environment variables or passing them to the callback. """ ) class NeptuneCallback(TrainerCallback): """TrainerCallback that sends the logs to [Neptune](https://app.neptune.ai). Args: api_token (`str`, *optional*): Neptune API token obtained upon registration. You can leave this argument out if you have saved your token to the `NEPTUNE_API_TOKEN` environment variable (strongly recommended). See full setup instructions in the [docs](https://docs.neptune.ai/setup/installation). project (`str`, *optional*): Name of an existing Neptune project, in the form "workspace-name/project-name". You can find and copy the name in Neptune from the project settings -> Properties. If None (default), the value of the `NEPTUNE_PROJECT` environment variable is used. name (`str`, *optional*): Custom name for the run. base_namespace (`str`, optional, defaults to "finetuning"): In the Neptune run, the root namespace that will contain all of the metadata logged by the callback. log_parameters (`bool`, *optional*, defaults to `True`): If True, logs all Trainer arguments and model parameters provided by the Trainer. log_checkpoints (`str`, *optional*): If "same", uploads checkpoints whenever they are saved by the Trainer. If "last", uploads only the most recently saved checkpoint. If "best", uploads the best checkpoint (among the ones saved by the Trainer). If `None`, does not upload checkpoints. run (`Run`, *optional*): Pass a Neptune run object if you want to continue logging to an existing run. Read more about resuming runs in the [docs](https://docs.neptune.ai/logging/to_existing_object). **neptune_run_kwargs (*optional*): Additional keyword arguments to be passed directly to the [`neptune.init_run()`](https://docs.neptune.ai/api/neptune#init_run) function when a new run is created. For instructions and examples, see the [Transformers integration guide](https://docs.neptune.ai/integrations/transformers) in the Neptune documentation. """ integration_version_key = "source_code/integrations/transformers" model_parameters_key = "model_parameters" trial_name_key = "trial" trial_params_key = "trial_params" trainer_parameters_key = "trainer_parameters" flat_metrics = {"train/epoch"} def __init__( self, *, api_token: Optional[str] = None, project: Optional[str] = None, name: Optional[str] = None, base_namespace: str = "finetuning", run=None, log_parameters: bool = True, log_checkpoints: Optional[str] = None, **neptune_run_kwargs, ): if not is_neptune_available(): raise ValueError( "NeptuneCallback requires the Neptune client library to be installed. " "To install the library, run `pip install neptune`." ) try: from neptune import Run from neptune.internal.utils import verify_type except ImportError: from neptune.new.internal.utils import verify_type from neptune.new.metadata_containers.run import Run verify_type("api_token", api_token, (str, type(None))) verify_type("project", project, (str, type(None))) verify_type("name", name, (str, type(None))) verify_type("base_namespace", base_namespace, str) verify_type("run", run, (Run, type(None))) verify_type("log_parameters", log_parameters, bool) verify_type("log_checkpoints", log_checkpoints, (str, type(None))) self._base_namespace_path = base_namespace self._log_parameters = log_parameters self._log_checkpoints = log_checkpoints self._initial_run: Optional[Run] = run self._run = None self._is_monitoring_run = False self._run_id = None self._force_reset_monitoring_run = False self._init_run_kwargs = {"api_token": api_token, "project": project, "name": name, **neptune_run_kwargs} self._volatile_checkpoints_dir = None self._should_upload_checkpoint = self._log_checkpoints is not None self._recent_checkpoint_path = None if self._log_checkpoints in {"last", "best"}: self._target_checkpoints_namespace = f"checkpoints/{self._log_checkpoints}" self._should_clean_recently_uploaded_checkpoint = True else: self._target_checkpoints_namespace = "checkpoints" self._should_clean_recently_uploaded_checkpoint = False def _stop_run_if_exists(self): if self._run: self._run.stop() del self._run self._run = None def _initialize_run(self, **additional_neptune_kwargs): try: from neptune import init_run from neptune.exceptions import NeptuneMissingApiTokenException, NeptuneMissingProjectNameException except ImportError: from neptune.new import init_run from neptune.new.exceptions import NeptuneMissingApiTokenException, NeptuneMissingProjectNameException self._stop_run_if_exists() try: run_params = additional_neptune_kwargs.copy() run_params.update(self._init_run_kwargs) self._run = init_run(**run_params) self._run_id = self._run["sys/id"].fetch() except (NeptuneMissingProjectNameException, NeptuneMissingApiTokenException) as e: raise NeptuneMissingConfiguration() from e def _use_initial_run(self): self._run = self._initial_run self._is_monitoring_run = True self._run_id = self._run["sys/id"].fetch() self._initial_run = None def _ensure_run_with_monitoring(self): if self._initial_run is not None: self._use_initial_run() else: if not self._force_reset_monitoring_run and self._is_monitoring_run: return if self._run and not self._is_monitoring_run and not self._force_reset_monitoring_run: self._initialize_run(with_id=self._run_id) self._is_monitoring_run = True else: self._initialize_run() self._force_reset_monitoring_run = False def _ensure_at_least_run_without_monitoring(self): if self._initial_run is not None: self._use_initial_run() else: if not self._run: self._initialize_run( with_id=self._run_id, capture_stdout=False, capture_stderr=False, capture_hardware_metrics=False, capture_traceback=False, ) self._is_monitoring_run = False @property def run(self): if self._run is None: self._ensure_at_least_run_without_monitoring() return self._run @property def _metadata_namespace(self): return self.run[self._base_namespace_path] def _log_integration_version(self): self.run[NeptuneCallback.integration_version_key] = version def _log_trainer_parameters(self, args): self._metadata_namespace[NeptuneCallback.trainer_parameters_key] = args.to_sanitized_dict() def _log_model_parameters(self, model): from neptune.utils import stringify_unsupported if model and hasattr(model, "config") and model.config is not None: self._metadata_namespace[NeptuneCallback.model_parameters_key] = stringify_unsupported( model.config.to_dict() ) def _log_hyper_param_search_parameters(self, state): if state and hasattr(state, "trial_name"): self._metadata_namespace[NeptuneCallback.trial_name_key] = state.trial_name if state and hasattr(state, "trial_params") and state.trial_params is not None: self._metadata_namespace[NeptuneCallback.trial_params_key] = state.trial_params def _log_model_checkpoint(self, source_directory: str, checkpoint: str): target_path = relative_path = os.path.join(source_directory, checkpoint) if self._volatile_checkpoints_dir is not None: consistent_checkpoint_path = os.path.join(self._volatile_checkpoints_dir, checkpoint) try: # Remove leading ../ from a relative path. cpkt_path = relative_path.replace("..", "").lstrip(os.path.sep) copy_path = os.path.join(consistent_checkpoint_path, cpkt_path) shutil.copytree(relative_path, copy_path) target_path = consistent_checkpoint_path except IOError as e: logger.warning( "NeptuneCallback was unable to made a copy of checkpoint due to I/O exception: '{}'. " "Could fail trying to upload.".format(e) ) self._metadata_namespace[self._target_checkpoints_namespace].upload_files(target_path) if self._should_clean_recently_uploaded_checkpoint and self._recent_checkpoint_path is not None: self._metadata_namespace[self._target_checkpoints_namespace].delete_files(self._recent_checkpoint_path) self._recent_checkpoint_path = relative_path def on_init_end(self, args, state, control, **kwargs): self._volatile_checkpoints_dir = None if self._log_checkpoints and (args.overwrite_output_dir or args.save_total_limit is not None): self._volatile_checkpoints_dir = tempfile.TemporaryDirectory().name if self._log_checkpoints == "best" and not args.load_best_model_at_end: raise ValueError("To save the best model checkpoint, the load_best_model_at_end argument must be enabled.") def on_train_begin(self, args, state, control, model=None, **kwargs): if not state.is_world_process_zero: return self._ensure_run_with_monitoring() self._force_reset_monitoring_run = True self._log_integration_version() if self._log_parameters: self._log_trainer_parameters(args) self._log_model_parameters(model) if state.is_hyper_param_search: self._log_hyper_param_search_parameters(state) def on_train_end(self, args, state, control, **kwargs): self._stop_run_if_exists() def __del__(self): if self._volatile_checkpoints_dir is not None: shutil.rmtree(self._volatile_checkpoints_dir, ignore_errors=True) self._stop_run_if_exists() def on_save(self, args, state, control, **kwargs): if self._should_upload_checkpoint: self._log_model_checkpoint(args.output_dir, f"checkpoint-{state.global_step}") def on_evaluate(self, args, state, control, metrics=None, **kwargs): if self._log_checkpoints == "best": best_metric_name = args.metric_for_best_model if not best_metric_name.startswith("eval_"): best_metric_name = f"eval_{best_metric_name}" metric_value = metrics.get(best_metric_name) operator = np.greater if args.greater_is_better else np.less self._should_upload_checkpoint = state.best_metric is None or operator(metric_value, state.best_metric) @classmethod def get_run(cls, trainer): for callback in trainer.callback_handler.callbacks: if isinstance(callback, cls): return callback.run raise Exception("The trainer doesn't have a NeptuneCallback configured.") def on_log(self, args, state, control, logs: Optional[Dict[str, float]] = None, **kwargs): if not state.is_world_process_zero: return if logs is not None: for name, value in rewrite_logs(logs).items(): if isinstance(value, (int, float)): if name in NeptuneCallback.flat_metrics: self._metadata_namespace[name] = value else: self._metadata_namespace[name].log(value, step=state.global_step) class CodeCarbonCallback(TrainerCallback): """ A [`TrainerCallback`] that tracks the CO2 emission of training. """ def __init__(self): if not is_codecarbon_available(): raise RuntimeError( "CodeCarbonCallback requires `codecarbon` to be installed. Run `pip install codecarbon`." ) import codecarbon self._codecarbon = codecarbon self.tracker = None def on_init_end(self, args, state, control, **kwargs): if self.tracker is None and state.is_local_process_zero: # CodeCarbon will automatically handle environment variables for configuration self.tracker = self._codecarbon.EmissionsTracker(output_dir=args.output_dir) def on_train_begin(self, args, state, control, model=None, **kwargs): if self.tracker and state.is_local_process_zero: self.tracker.start() def on_train_end(self, args, state, control, **kwargs): if self.tracker and state.is_local_process_zero: self.tracker.stop() class ClearMLCallback(TrainerCallback): """ A [`TrainerCallback`] that sends the logs to [ClearML](https://clear.ml/). Environment: - **CLEARML_PROJECT** (`str`, *optional*, defaults to `HuggingFace Transformers`): ClearML project name. - **CLEARML_TASK** (`str`, *optional*, defaults to `Trainer`): ClearML task name. - **CLEARML_LOG_MODEL** (`bool`, *optional*, defaults to `False`): Whether to log models as artifacts during training. """ log_suffix = "" _hparams_section = "Transformers" _model_config_section = "Model Configuration" _ignore_hparams_overrides = "_ignore_hparams_ui_overrides_" _ignoge_model_config_overrides = "_ignore_model_config_ui_overrides_" _model_config_description = "The configuration of model number {}." _model_config_description_note = ( "Note that, when cloning this task and running it remotely," " the configuration might be applied to another model instead of this one." " To avoid this, initialize the task externally by calling `Task.init`" " before the `ClearMLCallback` is instantiated." ) _train_run_counter = 0 _model_connect_counter = 0 _task_created_in_callback = False _should_close_on_train_end = None def __init__(self): if is_clearml_available(): import clearml self._clearml = clearml else: raise RuntimeError("ClearMLCallback requires 'clearml' to be installed. Run `pip install clearml`.") self._initialized = False self._clearml_task = None self._log_model = False self._checkpoints_saved = [] def setup(self, args, state, model, tokenizer, **kwargs): if self._clearml is None: return if self._initialized: return ClearMLCallback._train_run_counter += 1 ClearMLCallback._model_connect_counter += 1 ClearMLCallback.log_suffix = ( "" if ClearMLCallback._train_run_counter == 1 else "_" + str(ClearMLCallback._train_run_counter) ) if state.is_world_process_zero: logger.info("Automatic ClearML logging enabled.") if self._clearml_task is None: if ClearMLCallback._should_close_on_train_end is None: if not self._clearml.Task.running_locally() or self._clearml.Task.current_task(): ClearMLCallback._should_close_on_train_end = False else: ClearMLCallback._should_close_on_train_end = True # This might happen when running inside of a pipeline, where the task is already initialized # from outside of Hugging Face if self._clearml.Task.running_locally() and self._clearml.Task.current_task(): self._clearml_task = self._clearml.Task.current_task() self._log_model = os.getenv( "CLEARML_LOG_MODEL", "FALSE" if not ClearMLCallback._task_created_in_callback else "TRUE", ).upper() in ENV_VARS_TRUE_VALUES.union({"TRUE"}) logger.info("External ClearML Task has been connected.") else: self._clearml_task = self._clearml.Task.init( project_name=os.getenv("CLEARML_PROJECT", "HuggingFace Transformers"), task_name=os.getenv("CLEARML_TASK", "Trainer"), auto_connect_frameworks={"tensorboard": False, "pytorch": False}, output_uri=True, ) self._log_model = os.getenv("CLEARML_LOG_MODEL", "TRUE").upper() in ENV_VARS_TRUE_VALUES.union( {"TRUE"} ) ClearMLCallback._task_created_in_callback = True logger.info("ClearML Task has been initialized.") self._initialized = True suffixed_hparams_section = ClearMLCallback._hparams_section + ClearMLCallback.log_suffix ignore_hparams_config_section = suffixed_hparams_section + "/" + ClearMLCallback._ignore_hparams_overrides if self._clearml.Task.running_locally(): self._copy_training_args_as_hparams(args, suffixed_hparams_section) self._clearml_task.set_parameter( name=ignore_hparams_config_section, value=True, value_type=bool, description=( "If True, ignore Transformers hyperparameters overrides done in the UI/backend " + "when running remotely. Otherwise, the overrides will be applied when running remotely" ), ) elif not self._clearml_task.get_parameter(ignore_hparams_config_section, default=True, cast=True): self._clearml_task.connect(args, suffixed_hparams_section) else: self._copy_training_args_as_hparams( args, ClearMLCallback._hparams_section + ClearMLCallback.log_suffix ) if getattr(model, "config", None) is not None: ignore_model_config_section = ( suffixed_hparams_section + "/" + ClearMLCallback._ignoge_model_config_overrides ) configuration_object_description = ClearMLCallback._model_config_description.format( ClearMLCallback._model_connect_counter ) if ClearMLCallback._model_connect_counter != ClearMLCallback._train_run_counter: configuration_object_description += " " + ClearMLCallback._model_config_description_note if self._clearml.Task.running_locally(): self._clearml_task.set_parameter( name=ignore_model_config_section, value=True, value_type=bool, description=( "If True, ignore Transformers model configuration overrides done in the UI/backend " + "when running remotely. Otherwise, the overrides will be applied when running remotely" ), ) self._clearml_task.set_configuration_object( name=ClearMLCallback._model_config_section + ClearMLCallback.log_suffix, config_dict=model.config.to_dict(), description=configuration_object_description, ) elif not self._clearml_task.get_parameter(ignore_model_config_section, default=True, cast=True): model.config = model.config.from_dict( self._clearml_task.get_configuration_object_as_dict( ClearMLCallback._model_config_section + ClearMLCallback.log_suffix ) ) else: self._clearml_task.set_configuration_object( name=ClearMLCallback._model_config_section + ClearMLCallback.log_suffix, config_dict=model.config.to_dict(), description=configuration_object_description, ) def on_train_begin(self, args, state, control, model=None, tokenizer=None, **kwargs): if self._clearml is None: return self._checkpoints_saved = [] if state.is_hyper_param_search: self._initialized = False if not self._initialized: self.setup(args, state, model, tokenizer, **kwargs) def on_train_end(self, args, state, control, **kwargs): if ClearMLCallback._should_close_on_train_end: self._clearml_task.close() ClearMLCallback._train_run_counter = 0 def on_log(self, args, state, control, model=None, tokenizer=None, logs=None, **kwargs): if self._clearml is None: return if not self._initialized: self.setup(args, state, model, tokenizer, **kwargs) if state.is_world_process_zero: eval_prefix = "eval_" eval_prefix_len = len(eval_prefix) test_prefix = "test_" test_prefix_len = len(test_prefix) single_value_scalars = [ "train_runtime", "train_samples_per_second", "train_steps_per_second", "train_loss", "total_flos", "epoch", ] for k, v in logs.items(): if isinstance(v, (int, float)): if k in single_value_scalars: self._clearml_task.get_logger().report_single_value( name=k + ClearMLCallback.log_suffix, value=v ) elif k.startswith(eval_prefix): self._clearml_task.get_logger().report_scalar( title="eval" + ClearMLCallback.log_suffix, series=k[eval_prefix_len:], value=v, iteration=state.global_step, ) elif k.startswith(test_prefix): self._clearml_task.get_logger().report_scalar( title="test" + ClearMLCallback.log_suffix, series=k[test_prefix_len:], value=v, iteration=state.global_step, ) else: self._clearml_task.get_logger().report_scalar( title="train" + ClearMLCallback.log_suffix, series=k, value=v, iteration=state.global_step, ) else: logger.warning( "Trainer is attempting to log a value of " f'"{v}" of type {type(v)} for key "{k}" as a scalar. ' "This invocation of ClearML logger's report_scalar() " "is incorrect so we dropped this attribute." ) def on_save(self, args, state, control, **kwargs): if self._log_model and self._clearml_task and state.is_world_process_zero: ckpt_dir = f"checkpoint-{state.global_step}" artifact_path = os.path.join(args.output_dir, ckpt_dir) name = ckpt_dir + ClearMLCallback.log_suffix logger.info(f"Logging checkpoint artifact `{name}`. This may take some time.") output_model = self._clearml.OutputModel(task=self._clearml_task, name=name) output_model.connect(task=self._clearml_task, name=name) output_model.update_weights_package( weights_path=artifact_path, target_filename=ckpt_dir, iteration=state.global_step, auto_delete_file=False, ) self._checkpoints_saved.append(output_model) while args.save_total_limit and args.save_total_limit < len(self._checkpoints_saved): try: self._clearml.model.Model.remove( self._checkpoints_saved[0], delete_weights_file=True, force=True, raise_on_errors=True, ) except Exception as e: logger.warning( "Could not remove checkpoint `{}` after going over the `save_total_limit`. Error is: {}".format( self._checkpoints_saved[0].name, e ) ) break self._checkpoints_saved = self._checkpoints_saved[1:] def _copy_training_args_as_hparams(self, training_args, prefix): as_dict = { field.name: getattr(training_args, field.name) for field in fields(training_args) if field.init and not field.name.endswith("_token") } flat_dict = {str(k): v for k, v in self._clearml.utilities.proxy_object.flatten_dictionary(as_dict).items()} self._clearml_task._arguments.copy_from_dict(flat_dict, prefix=prefix) class FlyteCallback(TrainerCallback): """A [`TrainerCallback`] that sends the logs to [Flyte](https://flyte.org/). NOTE: This callback only works within a Flyte task. Args: save_log_history (`bool`, *optional*, defaults to `True`): When set to True, the training logs are saved as a Flyte Deck. sync_checkpoints (`bool`, *optional*, defaults to `True`): When set to True, checkpoints are synced with Flyte and can be used to resume training in the case of an interruption. Example: ```python # Note: This example skips over some setup steps for brevity. from flytekit import current_context, task @task def train_hf_transformer(): cp = current_context().checkpoint trainer = Trainer(..., callbacks=[FlyteCallback()]) output = trainer.train(resume_from_checkpoint=cp.restore()) ``` """ def __init__(self, save_log_history: bool = True, sync_checkpoints: bool = True): super().__init__() if not is_flytekit_available(): raise ImportError("FlyteCallback requires flytekit to be installed. Run `pip install flytekit`.") if not is_flyte_deck_standard_available() or not is_pandas_available(): logger.warning( "Syncing log history requires both flytekitplugins-deck-standard and pandas to be installed. " "Run `pip install flytekitplugins-deck-standard pandas` to enable this feature." ) save_log_history = False from flytekit import current_context self.cp = current_context().checkpoint self.save_log_history = save_log_history self.sync_checkpoints = sync_checkpoints def on_save(self, args, state, control, **kwargs): if self.sync_checkpoints and state.is_world_process_zero: ckpt_dir = f"checkpoint-{state.global_step}" artifact_path = os.path.join(args.output_dir, ckpt_dir) logger.info(f"Syncing checkpoint in {ckpt_dir} to Flyte. This may take time.") self.cp.save(artifact_path) def on_train_end(self, args, state, control, **kwargs): if self.save_log_history: import pandas as pd from flytekit import Deck from flytekitplugins.deck.renderer import TableRenderer log_history_df = pd.DataFrame(state.log_history) Deck("Log History", TableRenderer().to_html(log_history_df)) class DVCLiveCallback(TrainerCallback): """ A [`TrainerCallback`] that sends the logs to [DVCLive](https://www.dvc.org/doc/dvclive). Use the environment variables below in `setup` to configure the integration. To customize this callback beyond those environment variables, see [here](https://dvc.org/doc/dvclive/ml-frameworks/huggingface). Args: live (`dvclive.Live`, *optional*, defaults to `None`): Optional Live instance. If None, a new instance will be created using **kwargs. log_model (Union[Literal["all"], bool], *optional*, defaults to `None`): Whether to use `dvclive.Live.log_artifact()` to log checkpoints created by [`Trainer`]. If set to `True`, the final checkpoint is logged at the end of training. If set to `"all"`, the entire [`TrainingArguments`]'s `output_dir` is logged at each checkpoint. """ def __init__( self, live: Optional[Any] = None, log_model: Optional[Union[Literal["all"], bool]] = None, **kwargs, ): if not is_dvclive_available(): raise RuntimeError("DVCLiveCallback requires dvclive to be installed. Run `pip install dvclive`.") from dvclive import Live self._initialized = False self.live = None if isinstance(live, Live): self.live = live elif live is not None: raise RuntimeError(f"Found class {live.__class__} for live, expected dvclive.Live") self._log_model = log_model if self._log_model is None: log_model_env = os.getenv("HF_DVCLIVE_LOG_MODEL", "FALSE") if log_model_env.upper() in ENV_VARS_TRUE_VALUES: self._log_model = True elif log_model_env.lower() == "all": self._log_model = "all" def setup(self, args, state, model): """ Setup the optional DVCLive integration. To customize this callback beyond the environment variables below, see [here](https://dvc.org/doc/dvclive/ml-frameworks/huggingface). Environment: - **HF_DVCLIVE_LOG_MODEL** (`str`, *optional*): Whether to use `dvclive.Live.log_artifact()` to log checkpoints created by [`Trainer`]. If set to `True` or *1*, the final checkpoint is logged at the end of training. If set to `all`, the entire [`TrainingArguments`]'s `output_dir` is logged at each checkpoint. """ from dvclive import Live self._initialized = True if state.is_world_process_zero: if not self.live: self.live = Live() self.live.log_params(args.to_dict()) def on_train_begin(self, args, state, control, model=None, **kwargs): if not self._initialized: self.setup(args, state, model) def on_log(self, args, state, control, model=None, logs=None, **kwargs): if not self._initialized: self.setup(args, state, model) if state.is_world_process_zero: from dvclive.plots import Metric from dvclive.utils import standardize_metric_name for key, value in logs.items(): if Metric.could_log(value): self.live.log_metric(standardize_metric_name(key, "dvclive.huggingface"), value) else: logger.warning( "Trainer is attempting to log a value of " f'"{value}" of type {type(value)} for key "{key}" as a scalar. ' "This invocation of DVCLive's Live.log_metric() " "is incorrect so we dropped this attribute." ) self.live.next_step() def on_save(self, args, state, control, **kwargs): if self._log_model == "all" and self._initialized and state.is_world_process_zero: self.live.log_artifact(args.output_dir) def on_train_end(self, args, state, control, **kwargs): if self._initialized and state.is_world_process_zero: from transformers.trainer import Trainer if self._log_model is True: fake_trainer = Trainer(args=args, model=kwargs.get("model"), tokenizer=kwargs.get("tokenizer")) name = "best" if args.load_best_model_at_end else "last" output_dir = os.path.join(args.output_dir, name) fake_trainer.save_model(output_dir) self.live.log_artifact(output_dir, name=name, type="model", copy=True) self.live.end() INTEGRATION_TO_CALLBACK = { "azure_ml": AzureMLCallback, "comet_ml": CometCallback, "mlflow": MLflowCallback, "neptune": NeptuneCallback, "tensorboard": TensorBoardCallback, "wandb": WandbCallback, "codecarbon": CodeCarbonCallback, "clearml": ClearMLCallback, "dagshub": DagsHubCallback, "flyte": FlyteCallback, "dvclive": DVCLiveCallback, } def get_reporting_integration_callbacks(report_to): for integration in report_to: if integration not in INTEGRATION_TO_CALLBACK: raise ValueError( f"{integration} is not supported, only {', '.join(INTEGRATION_TO_CALLBACK.keys())} are supported." ) return [INTEGRATION_TO_CALLBACK[integration] for integration in report_to]
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/integrations/awq.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. "AWQ (Activation aware Weight Quantization) integration file" from ..activations import ACT2FN from ..modeling_utils import PreTrainedModel from ..utils import is_auto_awq_available, is_torch_available from ..utils.quantization_config import ( AwqBackendPackingMethod, AwqConfig, AWQLinearVersion, ExllamaVersion, ) if is_torch_available(): import torch import torch.nn as nn AWQ_FUSED_MAPPINGS = { "mistral": { "attention": ["q_proj", "k_proj", "v_proj", "o_proj"], "mlp": ["gate_proj", "up_proj", "down_proj"], "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"], "use_alibi": False, }, "mixtral": { "attention": ["q_proj", "k_proj", "v_proj", "o_proj"], "mlp": ["w1", "w3", "w2"], "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"], "use_alibi": False, "rope_theta": 1000000.0, }, "llama": { "attention": ["q_proj", "k_proj", "v_proj", "o_proj"], "mlp": ["gate_proj", "up_proj", "down_proj"], "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"], "use_alibi": False, }, "llava": { "attention": ["q_proj", "k_proj", "v_proj", "o_proj"], "mlp": ["gate_proj", "up_proj", "down_proj"], "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"], "use_alibi": False, }, } def replace_with_awq_linear( model, modules_to_not_convert=None, quantization_config=None, current_key_name=None, has_been_replaced=False, ) -> bool: """ Public method that recursively replaces the Linear layers of the given model with AWQ quantized layers. `accelerate` is needed to use this method. Returns the converted model and a boolean that indicates if the conversion has been successfull or not. During the module replacement, we also infer the backend to use through the `quantization_config` object. Args: model (`torch.nn.Module`): The model to convert, can be any `torch.nn.Module` instance. quantization_config (`AwqConfig`): The quantization config object that contains the quantization parameters. modules_to_not_convert (`list`, *optional*): A list of modules to not convert. If a module name is in the list (e.g. `lm_head`), it will not be converted. current_key_name (`list`, *optional*): A list that contains the current key name. This is used for recursion and should not be passed by the user. has_been_replaced (`bool`, *optional*): A boolean that indicates if the conversion has been successful or not. This is used for recursion and should not be passed by the user. """ if modules_to_not_convert is None: modules_to_not_convert = [] backend = quantization_config.backend if not is_auto_awq_available(): raise ValueError( "AWQ (either `autoawq` or `llmawq`) is not available. Please install it with `pip install autoawq` or check out the installation guide in https://github.com/mit-han-lab/llm-awq" ) if backend == AwqBackendPackingMethod.AUTOAWQ: if quantization_config.version == AWQLinearVersion.GEMM: from awq.modules.linear.gemm import WQLinear_GEMM target_cls = WQLinear_GEMM elif quantization_config.version == AWQLinearVersion.GEMV: from awq.modules.linear.gemv import WQLinear_GEMV target_cls = WQLinear_GEMV elif quantization_config.version == AWQLinearVersion.EXLLAMA: if quantization_config.exllama_config["version"] == ExllamaVersion.ONE: from awq.modules.linear.exllama import WQLinear_Exllama target_cls = WQLinear_Exllama elif quantization_config.exllama_config["version"] == ExllamaVersion.TWO: from awq.modules.linear.exllamav2 import WQLinear_ExllamaV2 target_cls = WQLinear_ExllamaV2 else: raise ValueError(f"Unrecognized Exllama version: {quantization_config.exllama_config['version']}") else: raise ValueError(f"Unrecognized AWQ version: {quantization_config.version}") else: from awq.quantize.qmodule import WQLinear target_cls = WQLinear for name, module in model.named_children(): if current_key_name is None: current_key_name = [] current_key_name.append(name) if isinstance(module, nn.Linear) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in ".".join(current_key_name) for key in modules_to_not_convert): in_features = module.in_features out_features = module.out_features model._modules[name] = target_cls( w_bit=quantization_config.bits, group_size=quantization_config.group_size, in_features=in_features, out_features=out_features, bias=module.bias is not None, dev=module.weight.device, ) has_been_replaced = True # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(False) if len(list(module.children())) > 0: _, has_been_replaced = replace_with_awq_linear( module, modules_to_not_convert=modules_to_not_convert, current_key_name=current_key_name, quantization_config=quantization_config, has_been_replaced=has_been_replaced, ) # Remove the last key for recursion current_key_name.pop(-1) return model, has_been_replaced def get_modules_to_fuse(model, quantization_config): """ Returns the fusing mapping given the quantization config and the model Args: model (`~PreTrainedModel`): The model to fuse - note this model should have been converted into AWQ format beforehand. quantization_config (`~transformers.quantization_config.AWQConfig`): The quantization configuration to use. """ if not isinstance(model, PreTrainedModel): raise ValueError(f"The model should be an instance of `PreTrainedModel`, got {model.__class__.__name__}") # Always default to `quantization_config.modules_to_fuse` if quantization_config.modules_to_fuse is not None: current_fused_mapping = quantization_config.modules_to_fuse current_fused_mapping["max_seq_len"] = quantization_config.fuse_max_seq_len elif model.config.model_type in AWQ_FUSED_MAPPINGS: current_fused_mapping = AWQ_FUSED_MAPPINGS[model.config.model_type] # Properly deal with the case where we have a multi-modal model as well (e.g. Llava) if not hasattr(model.config, "text_config"): config = model.config else: config = model.config.text_config # Handle hidden_size, num_attention_heads, num_key_value_heads on our own. hidden_size = config.hidden_size num_attention_heads = config.num_attention_heads num_key_value_heads = getattr(config, "num_key_value_heads", num_attention_heads) # Fill `current_fused_mapping` with the expected values current_fused_mapping["hidden_size"] = hidden_size current_fused_mapping["num_attention_heads"] = num_attention_heads current_fused_mapping["num_key_value_heads"] = num_key_value_heads current_fused_mapping["max_seq_len"] = quantization_config.fuse_max_seq_len else: raise ValueError( "Fusing mapping not found either on the quantization config or the supported `AWQ_FUSED_MAPPINGS`. Please pass a `fused_mapping` argument" " in the `quantization_config` or raise an issue on transformers https://github.com/huggingface/transformers to add its support." ) return current_fused_mapping def fuse_awq_modules(model, quantization_config): """ Optionally fuse some modules in the model to speedup inference. Args: model (`~PreTrainedModel`): The model to fuse - note this model should have been converted into AWQ format beforehand. quantization_config (`Union[AwqConfig, dict]`): The quantization configuration to use. """ # We need to convert it from dict in order to get an AwqConfig object # otherwise the fields `backend` etc. will not be available # https://github.com/huggingface/transformers/pull/27411#discussion_r1414044495 if isinstance(quantization_config, dict): quantization_config = AwqConfig.from_dict(quantization_config) backend = quantization_config.backend modules_to_fuse = get_modules_to_fuse(model, quantization_config) modules_to_not_convert = getattr(quantization_config, "modules_to_not_convert", None) if backend == AwqBackendPackingMethod.AUTOAWQ: from awq.modules.fused.attn import QuantAttentionFused from awq.modules.fused.mlp import QuantFusedMLP from awq.modules.fused.norm import FasterTransformerRMSNorm else: raise ValueError("Fusing is only supported for the AutoAWQ backend") fused_attention_modules = [] for name, module in model.named_modules(): if modules_to_not_convert is not None: if any(module_name_to_not_convert in name for module_name_to_not_convert in modules_to_not_convert): continue # Replace layer norms _fuse_awq_layernorm(modules_to_fuse["layernorm"], module, FasterTransformerRMSNorm) # Replace MLP layers _fuse_awq_mlp(model, name, modules_to_fuse["mlp"], module, QuantFusedMLP) # Replace attention layers attention_has_been_fused = _fuse_awq_attention_layers( model, module, modules_to_fuse, name, QuantAttentionFused ) if attention_has_been_fused: fused_attention_modules.append(name.split(".")[0]) # For AWQ fused + Llama we need to set `config._attn_implementation` = "custom" to avoid unexpected behavior and pass # `None` attention mask to the fused attention modules as now the attention mask is dropped by our models and dealt # by the `AttentionMaskConverter` module. if len(fused_attention_modules) > 0: for module_name, module in model.named_modules(): if any( module_name in fused_attention_modules for fused_attention_parent_module in fused_attention_modules ): if hasattr(module, "config") and hasattr(module.config, "_attn_implementation"): module.config._attn_implementation = "custom" return model def _fuse_awq_layernorm(fuse_module_names, module, target_cls): """ Fuse the LayerNorm layers into a target class using autoawq Args: fuse_module_names (`List[str]`): The list of module names to fuse module (`nn.Module`): The pytorch parent module that has layernorm modules to fuse target_cls (`~autoawq.FasterTransformerRMSNorm`): The `FasterTransformerRMSNorm` class as it only supports that class for now. """ for module_name in fuse_module_names: if hasattr(module, module_name): old_module = getattr(module, module_name) module._modules[module_name] = target_cls( old_module.weight, old_module.variance_epsilon, ).to(old_module.weight.device) del old_module def _fuse_awq_mlp(model, current_module_name, fuse_module_names, module, target_cls): """ Fuse the MLP layers into a target class using autoawq Args: model (`~PreTrainedModel`): The input pretrained model current_module_name (`str`): The current submodule name fuse_module_names (`List[str]`): The list of module names to fuse. For the MLP layers it has to be an array of length 3 that consists of the 3 MLP layers in the order (gate (dense layer post-attention) / up / down layers) module (`nn.Module`): The pytorch parent module that has layernorm modules to fuse target_cls (`~autoawq.QuantFusedMLP`): The `QuantFusedMLP` class as it only supports that class for now. """ if len(fuse_module_names) == 0: return if hasattr(module, fuse_module_names[0]): gate_proj = getattr(module, fuse_module_names[0]) up_proj = getattr(module, fuse_module_names[1]) down_proj = getattr(module, fuse_module_names[2]) previous_device = gate_proj.qweight.device # Deal also with the case model has `text_config` attribute hidden_act = ( model.config.hidden_act if not hasattr(model.config, "text_config") else model.config.text_config.hidden_act ) activation_fn = ACT2FN[hidden_act] new_module = target_cls(gate_proj, down_proj, up_proj, activation_fn) parent_name, child_name = current_module_name.rsplit(".", 1) parent = model.get_submodule(parent_name) setattr(parent, child_name, new_module.to(previous_device)) del gate_proj, up_proj, down_proj def _fuse_awq_attention_layers(model, module, modules_to_fuse, current_module_name, target_cls): """ Fuse the Attention layers into a target class using autoawq Args: model (`~PreTrainedModel`): The input pretrained model module (`nn.Module`): The pytorch parent module that has layernorm modules to fuse modules_to_fuse (`List[str]`): The module fusing mapping. The dictionary has to contain a field `attention` with attention module names in the correct order: q, k, v, o layer current_module_name (`str`): The current submodule name target_cls (`~autoawq.QuantAttentionFused`): The `QuantAttentionFused` class as it only supports that class for now. """ from awq.modules.linear import WQLinear_GEMM, WQLinear_GEMV module_has_been_fused = False if len(modules_to_fuse["attention"]) == 0: return module_has_been_fused if hasattr(module, modules_to_fuse["attention"][0]): # First, we pack the QKV layers together q_proj = getattr(module, modules_to_fuse["attention"][0]) if isinstance(q_proj, WQLinear_GEMV): linear_target_cls = WQLinear_GEMV cat_dim = 0 elif isinstance(q_proj, WQLinear_GEMM): linear_target_cls = WQLinear_GEMM cat_dim = 1 else: raise ValueError("Unsupported q_proj type: {type(q_proj)}") previous_device = q_proj.qweight.device k_proj = getattr(module, modules_to_fuse["attention"][1]) v_proj = getattr(module, modules_to_fuse["attention"][2]) o_proj = getattr(module, modules_to_fuse["attention"][3]) bias = torch.cat([q_proj.bias, k_proj.bias, v_proj.bias], dim=0) if q_proj.bias is not None else None qkv_layer = linear_target_cls( q_proj.w_bit, q_proj.group_size, q_proj.in_features, q_proj.out_features + k_proj.out_features + v_proj.out_features, q_proj.bias is not None, next(iter(module.state_dict().values())).device, ) qkv_layer.qweight = torch.cat([q_proj.qweight, k_proj.qweight, v_proj.qweight], dim=cat_dim) qkv_layer.qzeros = torch.cat([q_proj.qzeros, k_proj.qzeros, v_proj.qzeros], dim=cat_dim) qkv_layer.scales = torch.cat([q_proj.scales, k_proj.scales, v_proj.scales], dim=cat_dim) if isinstance(qkv_layer, WQLinear_GEMV): qkv_layer.split_k_iters = q_proj.split_k_iters qkv_layer.bias = bias fused_attention_layer = target_cls( modules_to_fuse["hidden_size"], modules_to_fuse["num_attention_heads"], modules_to_fuse["num_key_value_heads"], qkv_layer, o_proj, previous_device, modules_to_fuse["max_seq_len"], use_alibi=modules_to_fuse["use_alibi"], # The default value in autoawq is set to 10000.0 rope_theta=modules_to_fuse.get("rope_theta", 10000.0), ) fused_attention_layer.is_hf_transformers = True parent_name, child_name = current_module_name.rsplit(".", 1) parent = model.get_submodule(parent_name) setattr(parent, child_name, fused_attention_layer.to(previous_device)) del q_proj, k_proj, v_proj, o_proj module_has_been_fused = True return module_has_been_fused def post_init_awq_exllama_modules(model, exllama_config): """ Runs post init for Exllama layers which performs: - Weights unpacking, reordering and repacking - Devices scratch space allocation """ if exllama_config["version"] == ExllamaVersion.ONE: from awq.modules.linear.exllama import exllama_post_init model = exllama_post_init(model) elif exllama_config["version"] == ExllamaVersion.TWO: from awq.modules.linear.exllamav2 import exllamav2_post_init model = exllamav2_post_init( model, max_input_len=exllama_config["max_input_len"], max_batch_size=exllama_config["max_batch_size"], ) else: raise ValueError(f"Unrecognized Exllama version: {exllama_config['version']}") return model
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/integrations/bitsandbytes.py
import importlib.metadata import warnings from copy import deepcopy from inspect import signature from packaging import version from ..utils import is_accelerate_available, is_bitsandbytes_available, logging if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import Conv1D if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters logger = logging.get_logger(__name__) def set_module_quantized_tensor_to_device(module, tensor_name, device, value=None, quantized_stats=None): """ A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function). The function is adapted from `set_module_tensor_to_device` function from accelerate that is adapted to support the class `Int8Params` from `bitsandbytes`. Args: module (`torch.nn.Module`): The module in which the tensor we want to move lives. tensor_name (`str`): The full name of the parameter/buffer. device (`int`, `str` or `torch.device`): The device on which to set the tensor. value (`torch.Tensor`, *optional*): The value of the tensor (useful when going from the meta device to any other device). quantized_stats (`dict[str, Any]`, *optional*): Dict with items for either 4-bit or 8-bit serialization """ # Recurse if needed if "." in tensor_name: splits = tensor_name.split(".") for split in splits[:-1]: new_module = getattr(module, split) if new_module is None: raise ValueError(f"{module} has no attribute {split}.") module = new_module tensor_name = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.") is_buffer = tensor_name in module._buffers old_value = getattr(module, tensor_name) if old_value.device == torch.device("meta") and device not in ["meta", torch.device("meta")] and value is None: raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}.") prequantized_loading = quantized_stats is not None if is_buffer or not is_bitsandbytes_available(): is_8bit = False is_4bit = False else: is_4bit = hasattr(bnb.nn, "Params4bit") and isinstance(module._parameters[tensor_name], bnb.nn.Params4bit) is_8bit = isinstance(module._parameters[tensor_name], bnb.nn.Int8Params) if is_8bit or is_4bit: param = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: new_value = old_value.to(device) elif isinstance(value, torch.Tensor): new_value = value.to("cpu") else: new_value = torch.tensor(value, device="cpu") # Support models using `Conv1D` in place of `nn.Linear` (e.g. openai-community/gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls, Conv1D) and not prequantized_loading: new_value = new_value.T kwargs = old_value.__dict__ if prequantized_loading != (new_value.dtype in (torch.int8, torch.uint8)): raise ValueError( f"Value dtype `{new_value.dtype}` is not compatible with parameter quantization status." ) if is_8bit: is_8bit_serializable = version.parse(importlib.metadata.version("bitsandbytes")) > version.parse( "0.37.2" ) if new_value.dtype in (torch.int8, torch.uint8) and not is_8bit_serializable: raise ValueError( "Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. " "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." ) new_value = bnb.nn.Int8Params(new_value, requires_grad=False, **kwargs).to(device) if prequantized_loading: setattr(new_value, "SCB", quantized_stats["SCB"].to(device)) elif is_4bit: if prequantized_loading: is_4bit_serializable = version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse( "0.41.3" ) if new_value.dtype in (torch.int8, torch.uint8) and not is_4bit_serializable: raise ValueError( "Detected 4-bit weights but the version of bitsandbytes is not compatible with 4-bit serialization. " "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." ) new_value = bnb.nn.Params4bit.from_prequantized( data=new_value, quantized_stats=quantized_stats, requires_grad=False, device=device, **kwargs, ) else: new_value = bnb.nn.Params4bit(new_value, requires_grad=False, **kwargs).to(device) module._parameters[tensor_name] = new_value else: if value is None: new_value = old_value.to(device) elif isinstance(value, torch.Tensor): new_value = value.to(device) else: new_value = torch.tensor(value, device=device) if is_buffer: module._buffers[tensor_name] = new_value else: new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad) module._parameters[tensor_name] = new_value def _replace_with_bnb_linear( model, modules_to_not_convert=None, current_key_name=None, quantization_config=None, has_been_replaced=False, ): """ Private method that wraps the recursion for module replacement. Returns the converted model and a boolean that indicates if the conversion has been successfull or not. """ for name, module in model.named_children(): if current_key_name is None: current_key_name = [] current_key_name.append(name) if (isinstance(module, nn.Linear) or isinstance(module, Conv1D)) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` current_key_name_str = ".".join(current_key_name) if not any( (key + "." in current_key_name_str) or (key == current_key_name_str) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(module, Conv1D): in_features, out_features = module.weight.shape else: in_features = module.in_features out_features = module.out_features if quantization_config.quantization_method() == "llm_int8": model._modules[name] = bnb.nn.Linear8bitLt( in_features, out_features, module.bias is not None, has_fp16_weights=quantization_config.llm_int8_has_fp16_weight, threshold=quantization_config.llm_int8_threshold, ) has_been_replaced = True else: if ( quantization_config.llm_int8_skip_modules is not None and name in quantization_config.llm_int8_skip_modules ): pass else: extra_kwargs = ( {"quant_storage": quantization_config.bnb_4bit_quant_storage} if "quant_storage" in list(signature(bnb.nn.Linear4bit).parameters) else {} ) model._modules[name] = bnb.nn.Linear4bit( in_features, out_features, module.bias is not None, quantization_config.bnb_4bit_compute_dtype, compress_statistics=quantization_config.bnb_4bit_use_double_quant, quant_type=quantization_config.bnb_4bit_quant_type, **extra_kwargs, ) has_been_replaced = True # Store the module class in case we need to transpose the weight later model._modules[name].source_cls = type(module) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(False) if len(list(module.children())) > 0: _, has_been_replaced = _replace_with_bnb_linear( module, modules_to_not_convert, current_key_name, quantization_config, has_been_replaced=has_been_replaced, ) # Remove the last key for recursion current_key_name.pop(-1) return model, has_been_replaced def replace_with_bnb_linear(model, modules_to_not_convert=None, current_key_name=None, quantization_config=None): """ A helper function to replace all `torch.nn.Linear` modules by `bnb.nn.Linear8bit` modules from the `bitsandbytes` library. This will enable running your models using mixed int8 precision as described by the paper `LLM.int8(): 8-bit Matrix Multiplication for Transformers at Scale`. Make sure `bitsandbytes` compiled with the correct CUDA version of your hardware is installed before running this function. `pip install -i https://test.pypi.org/simple/ bitsandbytes` The function will be run recursively and replace all `torch.nn.Linear` modules except for the `lm_head` that should be kept as a `torch.nn.Linear` module. The replacement is done under `init_empty_weights` context manager so no CPU/GPU memory is required to run this function. Int8 mixed-precision matrix decomposition works by separating a matrix multiplication into two streams: (1) and systematic feature outlier stream matrix multiplied in fp16 (0.01%), (2) a regular stream of int8 matrix multiplication (99.9%). With this method, int8 inference with no predictive degradation is possible for very large models (>=176B parameters). Parameters: model (`torch.nn.Module`): Input model or `torch.nn.Module` as the function is run recursively. modules_to_not_convert (`List[`str`]`, *optional*, defaults to `["lm_head"]`): Names of the modules to not convert in `Linear8bitLt`. In practice we keep the `lm_head` in full precision for numerical stability reasons. current_key_name (`List[`str`]`, *optional*): An array to track the current key of the recursion. This is used to check whether the current key (part of it) is not in the list of modules to not convert (for instances modules that are offloaded to `cpu` or `disk`). """ modules_to_not_convert = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert model, has_been_replaced = _replace_with_bnb_linear( model, modules_to_not_convert, current_key_name, quantization_config ) if not has_been_replaced: logger.warning( "You are loading your model in 8bit or 4bit but no linear modules were found in your model." " Please double check your model architecture, or submit an issue on github if you think this is" " a bug." ) return model # For backward compatibility def replace_8bit_linear(*args, **kwargs): warnings.warn( "`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead", FutureWarning, ) return replace_with_bnb_linear(*args, **kwargs) # For backward compatiblity def set_module_8bit_tensor_to_device(*args, **kwargs): warnings.warn( "`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead", FutureWarning, ) return set_module_quantized_tensor_to_device(*args, **kwargs) def get_keys_to_not_convert(model): r""" An utility function to get the key of the module to keep in full precision if any For example for CausalLM modules we may want to keep the lm_head in full precision for numerical stability reasons. For other architectures, we want to keep the tied weights of the model. The function will return a list of the keys of the modules to not convert in int8. Parameters: model (`torch.nn.Module`): Input model """ # Create a copy of the model and tie the weights, then # check if it contains tied weights tied_model = deepcopy(model) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() tied_params = find_tied_parameters(tied_model) # For compatibility with Accelerate < 0.18 if isinstance(tied_params, dict): tied_keys = sum(list(tied_params.values()), []) + list(tied_params.keys()) else: tied_keys = sum(tied_params, []) has_tied_params = len(tied_keys) > 0 # If there is not tied weights, we want to keep the lm_head(output_embedding) in full precision if not has_tied_params: output_emb = model.get_output_embeddings() if output_emb is not None: list_last_module = [name for name, module in model.named_modules() if id(module) == id(output_emb)] return list_last_module # otherwise, no tied weights, no output embedding defined, simply keep the last module in full precision list_modules = list(model.named_parameters()) list_last_module = [list_modules[-1][0]] # add last module together with tied weights intersection = set(list_last_module) - set(tied_keys) list_untouched = list(set(tied_keys)) + list(intersection) # remove ".weight" from the keys names_to_remove = [".weight", ".bias"] filtered_module_names = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: name = name.replace(name_to_remove, "") filtered_module_names.append(name) return filtered_module_names
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/integrations/eetq.py
# coding=utf-8 # Copyright 2024 NetEase, Inc. and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..utils import is_accelerate_available, is_eetq_available, logging if is_eetq_available(): import eetq import torch.nn as nn if is_accelerate_available(): from accelerate import init_empty_weights logger = logging.get_logger(__name__) def _replace_with_eetq_linear( model, modules_to_not_convert=None, current_key_name=None, quantization_config=None, has_been_replaced=False, pre_quantized=False, ): """ Private method that wraps the recursion for module replacement. Returns the converted model and a boolean that indicates if the conversion has been successfull or not. """ if current_key_name is None: current_key_name = [] for name, module in model.named_children(): current_key_name.append(name) if (isinstance(module, nn.Linear)) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` current_key_name_str = ".".join(current_key_name) if not any( (key + "." in current_key_name_str) or (key == current_key_name_str) for key in modules_to_not_convert ): with init_empty_weights(): in_features = module.in_features out_features = module.out_features model._modules[name] = eetq.EetqLinear( in_features, out_features, module.bias is not None, module.weight.device ) if pre_quantized: model._modules[name].register_scale(module.weight.device) has_been_replaced = True # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(False) if len(list(module.children())) > 0: _, has_been_replaced = _replace_with_eetq_linear( module, modules_to_not_convert, current_key_name, quantization_config, has_been_replaced=has_been_replaced, pre_quantized=pre_quantized, ) # Remove the last key for recursion current_key_name.pop(-1) return model, has_been_replaced def replace_with_eetq_linear( model, modules_to_not_convert=None, current_key_name=None, quantization_config=None, pre_quantized=False ): """ A helper function to replace all `torch.nn.Linear` modules by `eetq.EetqLinear` modules from the `eetq` library. This will enable running your models using high performance int8 weight-only gemm kerner from FasterTransformer and TensorRT-LLM. Make sure `eetq` compiled with the correct CUDA version of your hardware is installed before running this function. EETQ shall be installed via the source 'https://github.com/NetEase-FuXi/EETQ' The function will be run recursively and replace all `torch.nn.Linear` modules except for the `lm_head` that should be kept as a `torch.nn.Linear` module. The replacement is done under `init_empty_weights` context manager so no CPU/GPU memory is required to run this function. Each weight will be quantized along the channel. Parameters: model (`torch.nn.Module`): Input model or `torch.nn.Module` as the function is run recursively. modules_to_not_convert (`List[`str`]`, *optional*, defaults to `["lm_head"]`): Names of the modules to not convert in `EetqLinear`. In practice we keep the `lm_head` in full precision for numerical stability reasons. current_key_name (`List[`str`]`, *optional*): An array to track the current key of the recursion. This is used to check whether the current key (part of it) is not in the list of modules to not convert (for instances modules that are offloaded to `cpu` or `disk`). """ modules_to_not_convert = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert if quantization_config.modules_to_not_convert is not None: modules_to_not_convert.extend(quantization_config.modules_to_not_convert) modules_to_not_convert = list(set(modules_to_not_convert)) model, has_been_replaced = _replace_with_eetq_linear( model, modules_to_not_convert, current_key_name, quantization_config, pre_quantized=pre_quantized ) if not has_been_replaced: logger.warning( "You are loading your model using eetq but no linear modules were found in your model." " Please double check your model architecture, or submit an issue on github if you think this is" " a bug." ) return model
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/integrations/tpu.py
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from torch.utils.data import DataLoader from ..utils import is_torch_xla_available def tpu_spmd_dataloader(dataloader: DataLoader): if is_torch_xla_available(): import torch_xla.distributed.parallel_loader as pl assert isinstance( dataloader, pl.MpDeviceLoader ), "The dataloader must be a `torch_xla.distributed.parallel_loader.MpDeviceLoader`." # This is to support PyTorch/XLA FSDP via SPMD. # Here we shard the input data's 0th dim across the fsdp axis. import torch_xla.distributed.spmd as xs sharding_spec = xs.ShardingSpec(xs.get_global_mesh(), ("fsdp", None)) dataloader._parallel_loader_kwargs["input_sharding"] = sharding_spec return dataloader else: return dataloader
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/integrations/aqlm.py
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. "AQLM (Additive Quantization of Language Model) integration file" from ..utils import is_accelerate_available, is_aqlm_available, is_torch_available if is_torch_available(): import torch.nn as nn def replace_with_aqlm_linear( model, quantization_config=None, linear_weights_not_to_quantize=None, current_key_name=None, has_been_replaced=False, ): """ Public method that recursively replaces the Linear layers of the given model with AQLM quantized layers. `accelerate` is needed to use this method. Returns the converted model and a boolean that indicates if the conversion has been successfull or not. Args: model (`torch.nn.Module`): The model to convert, can be any `torch.nn.Module` instance. quantization_config (`AqlmConfig`): The quantization config object that contains the quantization parameters. linear_weights_not_to_quantize (`list[str]`, *optional*): A list of nn.Linear weights to not convert. If a parameter path is in the list (e.g. `lm_head.weight`), the corresponding module will not be converted. current_key_name (`list`, *optional*): A list that contains the current key name. This is used for recursion and should not be passed by the user. has_been_replaced (`bool`, *optional*): A boolean that indicates if the conversion has been successful or not. This is used for recursion and should not be passed by the user. """ if not is_aqlm_available(): raise ValueError("AQLM is not available. Please install it with `pip install aqlm[cpu,gpu]`") if not is_accelerate_available(): raise ValueError("AQLM requires Accelerate to be installed: `pip install accelerate`") if linear_weights_not_to_quantize is None: linear_weights_not_to_quantize = [] from accelerate import init_empty_weights from aqlm import QuantizedLinear for name, module in model.named_children(): if current_key_name is None: current_key_name = [] current_key_name.append(name) if isinstance(module, nn.Linear): # Check if the current key is not in the `linear_weights_not_to_quantize` if ".".join(current_key_name) + ".weight" not in linear_weights_not_to_quantize: with init_empty_weights(): in_features = module.in_features out_features = module.out_features model._modules[name] = QuantizedLinear( in_features, out_features, bias=module.bias is not None, in_group_size=quantization_config.in_group_size, out_group_size=quantization_config.out_group_size, num_codebooks=quantization_config.num_codebooks, nbits_per_codebook=quantization_config.nbits_per_codebook, ) has_been_replaced = True # Store the module class in case we need to transpose the weight later model._modules[name].source_cls = type(module) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(False) if len(list(module.children())) > 0: _, has_been_replaced = replace_with_aqlm_linear( module, quantization_config=quantization_config, linear_weights_not_to_quantize=linear_weights_not_to_quantize, current_key_name=current_key_name, has_been_replaced=has_been_replaced, ) # Remove the last key for recursion current_key_name.pop(-1) return model, has_been_replaced
0
mavonic_private_repos/transformers/src/transformers
mavonic_private_repos/transformers/src/transformers/models/__init__.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_2, bloom, bridgetower, bros, byt5, camembert, canine, chinese_clip, clap, clip, clipseg, clvp, code_llama, codegen, cohere, conditional_detr, convbert, convnext, convnextv2, cpm, cpmant, ctrl, cvt, data2vec, dbrx, deberta, deberta_v2, decision_transformer, deformable_detr, deit, deprecated, depth_anything, deta, detr, dialogpt, dinat, dinov2, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, fastspeech2_conformer, flaubert, flava, fnet, focalnet, fsmt, funnel, fuyu, gemma, git, glpn, gpt2, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_sw3, gptj, gptsan_japanese, graphormer, grounding_dino, groupvit, herbert, hubert, ibert, idefics, idefics2, imagegpt, informer, instructblip, jamba, jukebox, kosmos2, layoutlm, layoutlmv2, layoutlmv3, layoutxlm, led, levit, lilt, llama, llava, llava_next, longformer, longt5, luke, lxmert, m2m_100, mamba, marian, markuplm, mask2former, maskformer, mbart, mbart50, mega, megatron_bert, megatron_gpt2, mgp_str, mistral, mixtral, mluke, mobilebert, mobilenet_v1, mobilenet_v2, mobilevit, mobilevitv2, mpnet, mpt, mra, mt5, musicgen, musicgen_melody, mvp, nat, nezha, nllb, nllb_moe, nougat, nystromformer, olmo, oneformer, openai, opt, owlv2, owlvit, patchtsmixer, patchtst, pegasus, pegasus_x, perceiver, persimmon, phi, phi3, phobert, pix2struct, plbart, poolformer, pop2piano, prophetnet, pvt, pvt_v2, qdqbert, qwen2, qwen2_moe, rag, realm, recurrent_gemma, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, seamless_m4t, seamless_m4t_v2, segformer, seggpt, sew, sew_d, siglip, speech_encoder_decoder, speech_to_text, speech_to_text_2, speecht5, splinter, squeezebert, stablelm, starcoder2, superpoint, swiftformer, swin, swin2sr, swinv2, switch_transformers, t5, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, trocr, tvlt, tvp, udop, umt5, unispeech, unispeech_sat, univnet, upernet, videomae, vilt, vipllava, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vitdet, vitmatte, vits, vivit, wav2vec2, wav2vec2_bert, wav2vec2_conformer, wav2vec2_phoneme, wav2vec2_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/pix2struct/configuration_pix2struct.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Pix2Struct model configuration""" import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) from ..deprecated._archive_maps import PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class Pix2StructTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Pix2StructTextModel`]. It is used to instantiate a Pix2Struct text model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Pix2Struct text decoder used by the [google/pix2struct-base](https://huggingface.co/google/pix2struct-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50244): Vocabulary size of the `Pix2Struct` text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Pix2StructTextModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. d_kv (`int`, *optional*, defaults to 64): Dimensionality of the key, query, value projections in each attention head. d_ff (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. relative_attention_max_distance (`int`, *optional*, defaults to 128): The maximum distance of the longer sequences for the bucket separation. dropout_rate (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. layer_norm_epsilon (`float`, *optional*, defaults to 1e-6): The epsilon used by the layer normalization layers. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). dense_act_fn (`Union[Callable, str]`, *optional*, defaults to `"gelu_new"`): The non-linear activation function (function or string). decoder_start_token_id (`int`, *optional*, defaults to 0): The id of the `decoder_start_token_id` token. use_cache (`bool`, *optional*, defaults to `False`): Whether or not the model should return the last key/values attentions (not used by all models). pad_token_id (`int`, *optional*, defaults to 0): The id of the `padding` token. eos_token_id (`int`, *optional*, defaults to 1): The id of the `end-of-sequence` token. Example: ```python >>> from transformers import Pix2StructTextConfig, Pix2StructTextModel >>> # Initializing a Pix2StructTextConfig with google/pix2struct-base style configuration >>> configuration = Pix2StructTextConfig() >>> # Initializing a Pix2StructTextModel (with random weights) from the google/pix2struct-base style configuration >>> model = Pix2StructTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "pix2struct_text_model" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "hidden_size": "hidden_size", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self, vocab_size=50244, hidden_size=768, d_kv=64, d_ff=2048, num_layers=12, num_heads=12, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-6, initializer_factor=1.0, dense_act_fn="gelu_new", decoder_start_token_id=0, use_cache=False, pad_token_id=0, eos_token_id=1, tie_word_embeddings=False, is_decoder=True, **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.d_kv = d_kv self.d_ff = d_ff self.num_layers = num_layers self.num_heads = num_heads self.relative_attention_num_buckets = relative_attention_num_buckets self.relative_attention_max_distance = relative_attention_max_distance self.dropout_rate = dropout_rate self.layer_norm_epsilon = layer_norm_epsilon self.initializer_factor = initializer_factor self.use_cache = use_cache self.eos_token_id = eos_token_id self.decoder_start_token_id = decoder_start_token_id # for backwards compatibility self.dense_act_fn = dense_act_fn super().__init__( pad_token_id=pad_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, tie_word_embeddings=tie_word_embeddings, is_decoder=is_decoder, **kwargs, ) @classmethod def from_pretrained( cls, pretrainehidden_size_name_or_path: Union[str, os.PathLike], **kwargs ) -> "PretrainedConfig": cls._set_token_in_kwargs(kwargs) config_dict, kwargs = cls.get_config_dict(pretrainehidden_size_name_or_path, **kwargs) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("model_type") == "pix2struct": config_dict = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(config_dict, **kwargs) class Pix2StructVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Pix2StructVisionModel`]. It is used to instantiate a Pix2Struct vision model according to the specified arguments, defining the model architecture. Instantiating a configuration defaults will yield a similar configuration to that of the Pix2Struct-base [google/pix2struct-base](https://huggingface.co/google/pix2struct-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. patch_embed_hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the input patch_embedding layer in the Transformer encoder. d_ff (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. d_kv (`int`, *optional*, defaults to 64): Dimensionality of the key, query, value projections per attention head. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. dense_act_fn (`str` or `function`, *optional*, defaults to `"gelu_new"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. dropout_rate (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 1e-10): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). seq_len (`int`, *optional*, defaults to 4096): Maximum sequence length (here number of patches) supported by the model. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. relative_attention_max_distance (`int`, *optional*, defaults to 128): The maximum distance (in tokens) to use for each attention layer. Example: ```python >>> from transformers import Pix2StructVisionConfig, Pix2StructVisionModel >>> # Initializing a Pix2StructVisionConfig with google/pix2struct-base style configuration >>> configuration = Pix2StructVisionConfig() >>> # Initializing a Pix2StructVisionModel (with random weights) from the google/pix2struct-base style configuration >>> model = Pix2StructVisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "pix2struct_vision_model" def __init__( self, hidden_size=768, patch_embed_hidden_size=768, d_ff=2048, d_kv=64, num_hidden_layers=12, num_attention_heads=12, dense_act_fn="gelu_new", layer_norm_eps=1e-6, dropout_rate=0.0, attention_dropout=0.0, initializer_range=1e-10, initializer_factor=1.0, seq_len=4096, relative_attention_num_buckets=32, relative_attention_max_distance=128, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.patch_embed_hidden_size = patch_embed_hidden_size self.d_ff = d_ff self.dropout_rate = dropout_rate self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.dense_act_fn = dense_act_fn self.seq_len = seq_len self.relative_attention_num_buckets = relative_attention_num_buckets self.relative_attention_max_distance = relative_attention_max_distance self.d_kv = d_kv @classmethod def from_pretrained( cls, pretrainehidden_size_name_or_path: Union[str, os.PathLike], **kwargs ) -> "PretrainedConfig": cls._set_token_in_kwargs(kwargs) config_dict, kwargs = cls.get_config_dict(pretrainehidden_size_name_or_path, **kwargs) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("model_type") == "pix2struct": config_dict = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(config_dict, **kwargs) class Pix2StructConfig(PretrainedConfig): r""" [`Pix2StructConfig`] is the configuration class to store the configuration of a [`Pix2StructForConditionalGeneration`]. It is used to instantiate a Pix2Struct model according to the specified arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the Pix2Struct-base [google/pix2struct-base](https://huggingface.co/google/pix2struct-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Pix2StructTextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Pix2StructVisionConfig`]. initializer_factor (`float`, *optional*, defaults to 1.0): Factor to multiply the initialization range with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. is_vqa (`bool`, *optional*, defaults to `False`): Whether the model has been fine-tuned for VQA or not. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import Pix2StructConfig, Pix2StructForConditionalGeneration >>> # Initializing a Pix2StructConfig with google/pix2struct-base style configuration >>> configuration = Pix2StructConfig() >>> # Initializing a Pix2StructForConditionalGeneration (with random weights) from the google/pix2struct-base style configuration >>> model = Pix2StructForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a Pix2StructConfig from a Pix2StructTextConfig and a Pix2StructVisionConfig >>> # Initializing a Pix2Struct text and Pix2Struct vision configuration >>> config_text = Pix2StructTextConfig() >>> config_vision = Pix2StructVisionConfig() >>> config = Pix2StructConfig.from_text_vision_configs(config_text, config_vision) ```""" model_type = "pix2struct" def __init__( self, text_config=None, vision_config=None, initializer_factor=1.0, initializer_range=0.02, is_vqa=False, tie_word_embeddings=False, is_encoder_decoder=True, **kwargs, ): super().__init__(tie_word_embeddings=tie_word_embeddings, is_encoder_decoder=is_encoder_decoder, **kwargs) if text_config is None: text_config = {} logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values.") if vision_config is None: vision_config = {} logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values.") self.text_config = Pix2StructTextConfig(**text_config) self.vision_config = Pix2StructVisionConfig(**vision_config) self.decoder_start_token_id = self.text_config.decoder_start_token_id self.pad_token_id = self.text_config.pad_token_id self.eos_token_id = self.text_config.eos_token_id self.initializer_factor = initializer_factor self.initializer_range = initializer_range self.text_config.initializer_range = self.initializer_range self.vision_config.initializer_range = self.initializer_range self.is_vqa = is_vqa @classmethod def from_text_vision_configs( cls, text_config: Pix2StructTextConfig, vision_config: Pix2StructVisionConfig, **kwargs ): r""" Instantiate a [`Pix2StructConfig`] (or a derived class) from pix2struct text model configuration and pix2struct vision model configuration. Returns: [`Pix2StructConfig`]: An instance of a configuration object """ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/pix2struct/convert_pix2struct_original_pytorch_to_hf.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import re import torch from flax.traverse_util import flatten_dict from t5x import checkpoints from transformers import ( AutoTokenizer, Pix2StructConfig, Pix2StructForConditionalGeneration, Pix2StructImageProcessor, Pix2StructProcessor, Pix2StructTextConfig, Pix2StructVisionConfig, ) def get_flax_param(t5x_checkpoint_path): flax_params = checkpoints.load_t5x_checkpoint(t5x_checkpoint_path) flax_params = flatten_dict(flax_params) return flax_params def rename_and_convert_flax_params(flax_dict): converted_dict = {} CONVERSION_MAPPING = { "token_embedder": "embeddings", "encoder_norm": "layernorm", "kernel": "weight", ".out": ".output", "scale": "weight", "embedders_0.pos_embedding": "row_embedder.weight", "embedders_1.pos_embedding": "column_embedder.weight", } DECODER_CONVERSION_MAPPING = { "query": "attention.query", "key": "attention.key", "value": "attention.value", "output.dense": "output", "encoder_decoder_attention.o": "encoder_decoder_attention.attention.o", "pre_self_attention_layer_norm": "self_attention.layer_norm", "pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm", "mlp.": "mlp.DenseReluDense.", "pre_mlp_layer_norm": "mlp.layer_norm", "self_attention.o": "self_attention.attention.o", "decoder.embeddings.embedding": "decoder.embed_tokens.weight", "decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight", "decoder.decoder_norm.weight": "decoder.final_layer_norm.weight", "decoder.logits_dense.weight": "decoder.lm_head.weight", } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key new_key = ".".join(key[1:]) # rename the key for old, new in CONVERSION_MAPPING.items(): new_key = new_key.replace(old, new) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): new_key = new_key.replace(old, new) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number new_key = re.sub(r"layers_(\d+)", r"layer.\1", new_key) new_key = new_key.replace("encoder", "encoder.encoder") elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number new_key = re.sub(r"layers_(\d+)", r"layer.\1", new_key) converted_dict[new_key] = flax_dict[key] converted_torch_dict = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): converted_torch_dict[key] = torch.from_numpy(converted_dict[key].T) else: converted_torch_dict[key] = torch.from_numpy(converted_dict[key]) return converted_torch_dict def convert_pix2struct_original_pytorch_checkpoint_to_hf( t5x_checkpoint_path, pytorch_dump_folder_path, use_large=False, is_vqa=False ): flax_params = get_flax_param(t5x_checkpoint_path) if not use_large: encoder_config = Pix2StructVisionConfig() decoder_config = Pix2StructTextConfig() else: encoder_config = Pix2StructVisionConfig( hidden_size=1536, d_ff=3968, num_attention_heads=24, num_hidden_layers=18 ) decoder_config = Pix2StructTextConfig(hidden_size=1536, d_ff=3968, num_heads=24, num_layers=18) config = Pix2StructConfig( vision_config=encoder_config.to_dict(), text_config=decoder_config.to_dict(), is_vqa=is_vqa ) model = Pix2StructForConditionalGeneration(config) torch_params = rename_and_convert_flax_params(flax_params) model.load_state_dict(torch_params) tok = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer") image_processor = Pix2StructImageProcessor() processor = Pix2StructProcessor(image_processor=image_processor, tokenizer=tok) if use_large: processor.image_processor.max_patches = 4096 processor.image_processor.is_vqa = True # mkdir if needed os.makedirs(pytorch_dump_folder_path, exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) print("Model saved in {}".format(pytorch_dump_folder_path)) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--use_large", action="store_true", help="Use large model.") parser.add_argument("--is_vqa", action="store_true", help="Use large model.") args = parser.parse_args() convert_pix2struct_original_pytorch_checkpoint_to_hf( args.t5x_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/pix2struct/processing_pix2struct.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for Pix2Struct. """ from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class Pix2StructProcessor(ProcessorMixin): r""" Constructs a PIX2STRUCT processor which wraps a BERT tokenizer and PIX2STRUCT image processor into a single processor. [`Pix2StructProcessor`] offers all the functionalities of [`Pix2StructImageProcessor`] and [`T5TokenizerFast`]. See the docstring of [`~Pix2StructProcessor.__call__`] and [`~Pix2StructProcessor.decode`] for more information. Args: image_processor (`Pix2StructImageProcessor`): An instance of [`Pix2StructImageProcessor`]. The image processor is a required input. tokenizer (Union[`T5TokenizerFast`, `T5Tokenizer`]): An instance of ['T5TokenizerFast`] or ['T5Tokenizer`]. The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "Pix2StructImageProcessor" tokenizer_class = ("T5Tokenizer", "T5TokenizerFast") def __init__(self, image_processor, tokenizer): tokenizer.return_token_type_ids = False super().__init__(image_processor, tokenizer) def __call__( self, images=None, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, max_patches: Optional[int] = 2048, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_token_type_ids: bool = False, return_length: bool = False, verbose: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> BatchEncoding: """ This method uses [`Pix2StructImageProcessor.preprocess`] method to prepare image(s) for the model, and [`T5TokenizerFast.__call__`] to prepare text for the model. Please refer to the docstring of the above two methods for more information. """ if images is None and text is None: raise ValueError("You have to specify either images or text.") # Get only text if images is None and not self.image_processor.is_vqa: self.current_processor = self.tokenizer text_encoding = self.tokenizer( text=text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_token_type_ids=return_token_type_ids, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs, ) return text_encoding if not self.image_processor.is_vqa: # add pixel_values encoding_image_processor = self.image_processor( images, return_tensors=return_tensors, max_patches=max_patches, **kwargs ) else: # add pixel_values and bbox encoding_image_processor = self.image_processor( images, return_tensors=return_tensors, max_patches=max_patches, header_text=text, **kwargs ) if text is not None and not self.image_processor.is_vqa: text_encoding = self.tokenizer( text=text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_token_type_ids=return_token_type_ids, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs, ) if "attention_mask" in text_encoding: text_encoding["decoder_attention_mask"] = text_encoding.pop("attention_mask") if "input_ids" in text_encoding: text_encoding["decoder_input_ids"] = text_encoding.pop("input_ids") else: text_encoding = None if text_encoding is not None: encoding_image_processor.update(text_encoding) return encoding_image_processor def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to Pix2StructTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to Pix2StructTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/pix2struct/__init__.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = { "configuration_pix2struct": [ "PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Pix2StructConfig", "Pix2StructTextConfig", "Pix2StructVisionConfig", ], "processing_pix2struct": ["Pix2StructProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["image_processing_pix2struct"] = ["Pix2StructImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_pix2struct"] = [ "PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST", "Pix2StructPreTrainedModel", "Pix2StructForConditionalGeneration", "Pix2StructVisionModel", "Pix2StructTextModel", ] if TYPE_CHECKING: from .configuration_pix2struct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, Pix2StructConfig, Pix2StructTextConfig, Pix2StructVisionConfig, ) from .processing_pix2struct import Pix2StructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pix2struct import Pix2StructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pix2struct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, Pix2StructForConditionalGeneration, Pix2StructPreTrainedModel, Pix2StructTextModel, Pix2StructVisionModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/pix2struct/modeling_pix2struct.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. & Google team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Pix2Struct modeling file""" import math from typing import Dict, List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPooling, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import ALL_LAYERNORM_LAYERS from ...utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging, replace_return_docstrings, ) from .configuration_pix2struct import Pix2StructConfig, Pix2StructTextConfig, Pix2StructVisionConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "Pix2StructConfig" from ..deprecated._archive_maps import PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 # Adapted from transformers.models.t5.modeling_t5.T5LayerNorm with T5->Pix2Struct class Pix2StructLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Construct a layernorm module in the T5 style. No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states try: from apex.normalization import FusedRMSNorm Pix2StructLayerNorm = FusedRMSNorm # noqa logger.info("Discovered apex.normalization.FusedRMSNorm - will use it instead of Pix2StructLayerNorm") except ImportError: # using the normal Pix2StructLayerNorm pass except Exception: logger.warning("Discovered apex but it failed to load, falling back to Pix2StructLayerNorm") pass ALL_LAYERNORM_LAYERS.append(Pix2StructLayerNorm) class Pix2StructVisionEmbeddings(nn.Module): r""" Construct the embeddings from patch. In `Pix2Struct` the input is different from classic Vision-transformer models. Here the input is a sequence of `seq_len` flattened patches that also combines padding patches (tokens). Each patch is represented by a vector of `hidden_size` values. """ def __init__(self, config: Pix2StructConfig) -> None: super().__init__() self.patch_projection = nn.Linear(config.patch_embed_hidden_size, config.hidden_size) self.row_embedder = nn.Embedding(config.seq_len, config.hidden_size) self.column_embedder = nn.Embedding(config.seq_len, config.hidden_size) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, flattened_patches: torch.Tensor) -> torch.Tensor: # the row and column indices are stored in the first and second position of the flattened_patches # flattened_patches: `batch_size`, `seq_len`, `hidden_size` + 2 row_indices = flattened_patches[:, :, 0].long() col_indices = flattened_patches[:, :, 1].long() flattened_patches = flattened_patches[:, :, 2:] embeddings = self.patch_projection(flattened_patches) row_embeddings = self.row_embedder(row_indices) col_embeddings = self.column_embedder(col_indices) # sum all embeddings together embeddings = embeddings + row_embeddings + col_embeddings embeddings = self.dropout(embeddings) return embeddings class Pix2StructVisionAttention(nn.Module): def __init__(self, config): super().__init__() self.hidden_size = config.hidden_size self.key_value_proj_dim = config.d_kv self.n_heads = config.num_attention_heads self.dropout = config.attention_dropout self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.query = nn.Linear(self.hidden_size, self.inner_dim, bias=False) self.key = nn.Linear(self.hidden_size, self.inner_dim, bias=False) self.value = nn.Linear(self.hidden_size, self.inner_dim, bias=False) self.output = nn.Linear(self.inner_dim, self.hidden_size, bias=False) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, ): """ Self-attention block """ # Input is (batch_size, seq_length, dim) # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2] def to_projection_shape(states): """projection""" return states.contiguous().view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) # get query states # (batch_size, n_heads, seq_length, dim_per_head) query_states = to_projection_shape(self.query(hidden_states)) # get key/value states key_states = to_projection_shape(self.key(hidden_states)) value_states = to_projection_shape(self.value(hidden_states)) # compute scores # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9 scores = torch.matmul(query_states, key_states.transpose(3, 2)) if position_bias is None: position_bias = torch.zeros( (1, self.n_heads, seq_length, seq_length), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length), device=scores.device, dtype=scores.dtype) if attention_mask.dim() == 2: position_bias = position_bias + attention_mask[:, None, None, :].to(position_bias.device) else: # (batch_size, n_heads, seq_length, key_length) position_bias = position_bias + attention_mask.to(position_bias.device) position_bias = 1 - position_bias position_bias_masked = position_bias.masked_fill(position_bias == 1, torch.finfo(scores.dtype).min) scores += position_bias_masked scores = torch.max(scores, torch.tensor(torch.finfo(scores.dtype).min)) # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.softmax(scores, dim=-1, dtype=torch.float32).type_as(scores) # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = torch.matmul(attn_weights, value_states) # (batch_size, seq_length, dim) attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) attn_output = self.output(attn_output) outputs = (attn_output,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5DenseGatedActDense->Pix2StructVisionMlp,T5Config->Pix2StructVisionConfig,config.d_model->config.hidden_size,dropout_rate->dropout_rate class Pix2StructVisionMlp(nn.Module): def __init__(self, config: Pix2StructVisionConfig): super().__init__() self.wi_0 = nn.Linear(config.hidden_size, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.hidden_size, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.hidden_size, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) # To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32. # See https://github.com/huggingface/transformers/issues/20287 # we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None`` if ( isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and self.wo.weight.dtype != torch.int8 ): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states class Pix2StructVisionLayer(nn.Module): def __init__(self, config: Pix2StructConfig) -> None: super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = Pix2StructVisionAttention(config) self.mlp = Pix2StructVisionMlp(config) self.pre_mlp_layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pre_attention_layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: residual = hidden_states # in Pix2StructVision, layernorm is applied before self-attention hidden_states = self.pre_attention_layer_norm(hidden_states) self_attention_outputs = self.attention( hidden_states, attention_mask=attention_mask, layer_head_mask=head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # first residual connection hidden_states = attention_output + residual # in Pix2StructVision, layernorm is also applied after self-attention layer_output = self.pre_mlp_layer_norm(hidden_states) layer_output = self.mlp(layer_output) + hidden_states # second residual connection outputs = (layer_output,) + outputs return outputs class Pix2StructVisionEncoder(nn.Module): def __init__(self, config: Pix2StructConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList([Pix2StructVisionLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, layer_head_mask, output_attentions, ) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class Pix2StructPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = Pix2StructConfig @property def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { "decoder_input_ids": input_ids, "input_ids": input_ids, "decoder_attention_mask": input_mask, } return dummy_inputs def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, Pix2StructLayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance(module, Pix2StructTextDenseGatedActDense): hidden_size = ( self.config.text_config.hidden_size if isinstance(self.config, Pix2StructConfig) else self.config.hidden_size ) d_ff = self.config.text_config.d_ff if isinstance(self.config, Pix2StructConfig) else self.config.d_ff module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5)) if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5)) if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, Pix2StructTextAttention): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 hidden_size = ( self.config.text_config.hidden_size if isinstance(self.config, Pix2StructConfig) else self.config.hidden_size ) key_value_proj_dim = ( self.config.text_config.d_kv if isinstance(self.config, Pix2StructConfig) else self.config.hidden_size ) n_heads = ( self.config.text_config.num_heads if isinstance(self.config, Pix2StructConfig) else self.config.num_heads ) module.query.weight.data.normal_(mean=0.0, std=factor * ((hidden_size * key_value_proj_dim) ** -0.5)) module.key.weight.data.normal_(mean=0.0, std=factor * (hidden_size**-0.5)) module.value.weight.data.normal_(mean=0.0, std=factor * (hidden_size**-0.5)) module.output.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5)) elif isinstance(module, nn.Embedding): hidden_size = ( self.config.text_config.hidden_size if isinstance(self.config, Pix2StructConfig) else self.config.hidden_size ) module.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5)) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, Pix2StructTextModel): hidden_size = ( self.config.text_config.hidden_size if isinstance(self.config, Pix2StructConfig) else self.config.hidden_size ) module.lm_head.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5)) elif isinstance(module, (nn.Linear, nn.Conv2d)): # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid # `trunc_normal_cpu` not implemented in `half` issues module.weight.data = nn.init.trunc_normal_( module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range ).to(module.weight.dtype) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, Pix2StructLayerNorm): if module.weight is not None: module.weight.data.fill_(1.0) elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->Pix2Struct def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id if decoder_start_token_id is None: raise ValueError( "self.model.config.decoder_start_token_id has to be defined. In Pix2Struct it is usually set to the pad_token_id. " "See Pix2Struct docs for more information." ) # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids PIX2STRUCT_VISION_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`Pix2StructConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ PIX2STRUCT_VISION_INPUTS_DOCSTRING = r""" Args: flattened_patches (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_channels x patch_height x patch_width)`): Flattened and padded pixel values. These values can be obtained using [`AutoImageProcessor`]. See [`Pix2StructVisionImageProcessor.__call__`] for details. Check the [original paper](https://arxiv.org/abs/2210.03347) (figure 5) for more details. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Pix2StructVision Model transformer outputting raw hidden-states without any specific head on top.", PIX2STRUCT_VISION_START_DOCSTRING, ) class Pix2StructVisionModel(Pix2StructPreTrainedModel): config_class = Pix2StructVisionConfig main_input_name = "flattened_patches" supports_gradient_checkpointing = True _no_split_modules = ["Pix2StructVisionLayer"] def __init__(self, config: Pix2StructConfig): super().__init__(config) self.config = config self.embeddings = Pix2StructVisionEmbeddings(config) self.encoder = Pix2StructVisionEncoder(config) self.layernorm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_eps) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_projection def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(PIX2STRUCT_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def forward( self, flattened_patches: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: Example: ```python >>> import requests >>> from PIL import Image >>> from transformers import AutoProcessor, Pix2StructVisionModel >>> image_processor = AutoProcessor.from_pretrained("google/pix2struct-textcaps-base") >>> model = Pix2StructVisionModel.from_pretrained("google/pix2struct-textcaps-base") >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = image_processor(images=image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 2048, 768] ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if flattened_patches is None: raise ValueError("You have to specify flattened_patches") if attention_mask is None: # check where `flattened_patches` is not 0 attention_mask = (flattened_patches.sum(dim=-1) != 0).float() # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(flattened_patches) encoder_outputs = self.encoder( embedding_output, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) if not return_dict: head_outputs = (sequence_output,) return head_outputs + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->Pix2StructText,d_model->hidden_size class Pix2StructTextDenseGatedActDense(nn.Module): def __init__(self, config: Pix2StructTextConfig): super().__init__() self.wi_0 = nn.Linear(config.hidden_size, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.hidden_size, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.hidden_size, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) # To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32. # See https://github.com/huggingface/transformers/issues/20287 # we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None`` if ( isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and self.wo.weight.dtype != torch.int8 ): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states class Pix2StructTextLayerFF(nn.Module): def __init__(self, config: Pix2StructTextConfig): super().__init__() self.DenseReluDense = Pix2StructTextDenseGatedActDense(config) self.layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) # Copied from transformers.models.t5.modeling_t5.T5LayerFF.forward def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states class Pix2StructTextAttention(nn.Module): def __init__(self, config: Pix2StructTextConfig, has_relative_attention_bias=False): super().__init__() self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.hidden_size = config.hidden_size self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.query = nn.Linear(self.hidden_size, self.hidden_size, bias=False) self.key = nn.Linear(self.hidden_size, self.hidden_size, bias=False) self.value = nn.Linear(self.hidden_size, self.hidden_size, bias=False) self.output = nn.Linear(self.hidden_size, self.hidden_size, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False @staticmethod # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets # Adapted from transformers.models.t5.modeling_t5.T5Attention.compute_bias def compute_bias(self, query_length, key_length, device=None): """Compute binned relative position bias""" if device is None: device = self.relative_attention_bias.weight.device context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=False, num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) return values def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ # Input is (batch_size, seq_length, dim) # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2] real_seq_length = seq_length if past_key_value is not None: if len(past_key_value) != 2: raise ValueError( f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states" ) real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length key_length = real_seq_length if key_value_states is None else key_value_states.shape[1] def to_projection_shape(states): """projection""" return states.contiguous().view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) def project(hidden_states, proj_layer, key_value_states, past_key_value): """projects hidden states correctly to key/query states""" if key_value_states is None: # self-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = to_projection_shape(proj_layer(hidden_states)) elif past_key_value is None: # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = to_projection_shape(proj_layer(key_value_states)) if past_key_value is not None: if key_value_states is None: # self-attn # (batch_size, n_heads, key_length, dim_per_head) hidden_states = torch.cat([past_key_value, hidden_states], dim=2) elif past_key_value.shape[2] != key_value_states.shape[1]: # checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = to_projection_shape(proj_layer(key_value_states)) else: # cross-attn hidden_states = past_key_value return hidden_states # get query states # (batch_size, n_heads, seq_length, dim_per_head) query_states = to_projection_shape(self.query(hidden_states)) # get key/value states key_states = project( hidden_states, self.key, key_value_states, past_key_value[0] if past_key_value is not None else None ) value_states = project( hidden_states, self.value, key_value_states, past_key_value[1] if past_key_value is not None else None ) # compute scores scores = torch.matmul( query_states, key_states.transpose(3, 2) ) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9 if position_bias is None: if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device) # if key and values are already calculated # we want only the last query position bias if past_key_value is not None: position_bias = position_bias[:, :, -hidden_states.size(1) :, :] if mask is not None: position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length) if self.pruned_heads: mask = torch.ones(position_bias.shape[1]) mask[list(self.pruned_heads)] = 0 position_bias_masked = position_bias[:, mask.bool()] else: position_bias_masked = position_bias scores += position_bias_masked # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = torch.matmul(attn_weights, value_states) # (batch_size, seq_length, dim) attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) attn_output = self.output(attn_output) present_key_value_state = (key_states, value_states) if use_cache else None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5LayerNorm->Pix2StructLayerNorm,T5Attention->Pix2StructTextAttention,self.SelfAttention->self.attention,config.d_model->config.hidden_size class Pix2StructTextLayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.attention = Pix2StructTextAttention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.attention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5LayerNorm->Pix2StructLayerNorm,T5Attention->Pix2StructTextAttention,self.EncDecAttention->self.attention,config.d_model->config.hidden_size class Pix2StructTextLayerCrossAttention(nn.Module): def __init__(self, config): super().__init__() self.attention = Pix2StructTextAttention(config, has_relative_attention_bias=False) self.layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.attention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] # add attentions if we output them return outputs class Pix2StructTextBlock(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.self_attention = Pix2StructTextLayerSelfAttention( config, has_relative_attention_bias=has_relative_attention_bias ) self.encoder_decoder_attention = Pix2StructTextLayerCrossAttention(config) self.mlp = Pix2StructTextLayerFF(config) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True, ): if past_key_value is not None: expected_num_past_key_values = 2 if encoder_hidden_states is None else 4 if len(past_key_value) != expected_num_past_key_values: raise ValueError( f"There should be {expected_num_past_key_values} past states. " f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}" f"Got {len(past_key_value)} past key / value states" ) self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value = None, None self_attention_outputs = self.self_attention( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) do_cross_attention = encoder_hidden_states is not None if do_cross_attention: # the actual query length is unknown for cross attention # if using past key value states. Need to inject it here if present_key_value_state is not None: query_length = present_key_value_state[0].shape[2] else: query_length = None cross_attention_outputs = self.encoder_decoder_attention( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0] # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) # Combine self attn and cross attn key value states if present_key_value_state is not None: present_key_value_state = present_key_value_state + cross_attention_outputs[1] # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states = self.mlp(hidden_states) # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if use_cache: outputs = outputs + (present_key_value_state,) + attention_outputs else: outputs = outputs + attention_outputs return outputs PIX2STRUCT_START_DOCSTRING = r""" The Pix2Struct model was proposed in [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347) by Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova. It's an encoder decoder transformer pre-trained in a image-to-text setting. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (Union[`Pix2StructConfig`, `Pix2StructTextConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ PIX2STRUCT_TEXT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Pix2StructText is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [Pix2StructText Training](./t5#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Pix2StructText uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [Pix2StructText Training](./t5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention layers. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ PIX2STRUCT_INPUTS_DOCSTRING = r""" Args: flattened_patches (`torch.FloatTensor` of shape `(batch_size, seq_length, hidden_size)`): Flattened pixel patches. the `hidden_size` is obtained by the following formula: `hidden_size` = `num_channels` * `patch_size` * `patch_size` The process of flattening the pixel patches is done by `Pix2StructProcessor`. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Pix2StructText uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [Pix2StructText Training](./t5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention layers. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss for the decoder. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The standalone text decoder of Pix2Struct", PIX2STRUCT_START_DOCSTRING, ) class Pix2StructTextModel(Pix2StructPreTrainedModel): config_class = Pix2StructTextConfig _no_split_modules = ["Pix2StructTextBlock"] _tied_weights_keys = ["lm_head.weight"] supports_gradient_checkpointing = True def __init__(self, config): super().__init__(config) self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size) self.layer = nn.ModuleList( [Pix2StructTextBlock(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)] ) self.final_layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() self.gradient_checkpointing = False # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._reorder_cache def _reorder_cache(self, past_key_values, beam_idx): # if decoder past is not included in output # speedy decoding is disabled and no need to reorder if past_key_values is None: logger.warning("You might want to consider setting `use_cache=True` to speed up decoding") return past_key_values reordered_decoder_past = () for layer_past_states in past_key_values: # get the correct batch idx from layer past batch dim # batch dim of `past` is at 2nd position reordered_layer_past_states = () for layer_past_state in layer_past_states: # need to set correct `past` for each of the four key / value states reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)), ) if reordered_layer_past_states[0].shape != layer_past_states[0].shape: raise ValueError( f"reordered_layer_past_states[0] shape {reordered_layer_past_states[0].shape} and layer_past_states[0] shape {layer_past_states[0].shape} mismatched" ) if len(reordered_layer_past_states) != len(layer_past_states): raise ValueError( f"length of reordered_layer_past_states {len(reordered_layer_past_states)} and length of layer_past_states {len(layer_past_states)} mismatched" ) reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(PIX2STRUCT_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Tuple[torch.FloatTensor, ...], CausalLMOutputWithCrossAttentions]: r""" Returns: Example: ```python >>> from transformers import AutoProcessor, Pix2StructTextModel >>> processor = AutoProcessor.from_pretrained("google/pix2struct-textcaps-base") >>> model = Pix2StructTextModel.from_pretrained("google/pix2struct-textcaps-base") >>> inputs = processor(text="Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> loss = outputs.loss ``` """ use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") if inputs_embeds is None: assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings" inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape # required mask seq length can be calculated via length of past mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) if encoder_attention_mask is None and encoder_hidden_states is not None: encoder_seq_length = encoder_hidden_states.shape[1] encoder_attention_mask = torch.ones( batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long ) # initialize past_key_values with `None` if past does not exist if past_key_values is None: past_key_values = [None] * len(self.layer) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) present_key_value_states = () if use_cache else None all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions) else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, (layer_module, past_key_value) in enumerate(zip(self.layer, past_key_values)): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False layer_outputs = self._gradient_checkpointing_func( layer_module.forward, hidden_states, extended_attention_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask, None, # past_key_value is always None with gradient checkpointing use_cache, output_attentions, ) else: layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) # layer_outputs is a tuple with: # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) if use_cache is False: layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] hidden_states, present_key_value_state = layer_outputs[:2] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[2] if encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3] # append next layer key value states if use_cache: present_key_value_states = present_key_value_states + (present_key_value_state,) if output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if encoder_hidden_states is not None: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) logits = self.lm_head(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) loss_fct = nn.CrossEntropyLoss(ignore_index=-100, reduction="mean") loss = loss_fct(logits.contiguous().view(-1, logits.size(-1)), labels.contiguous().view(-1)) if not return_dict: return tuple( v for v in [ loss, logits, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None ) return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) @add_start_docstrings( "A conditional generation model with a language modeling head. Can be used for sequence generation tasks.", PIX2STRUCT_START_DOCSTRING, ) class Pix2StructForConditionalGeneration(Pix2StructPreTrainedModel): config_class = Pix2StructConfig main_input_name = "flattened_patches" _tied_weights_keys = ["decoder.lm_head.weight"] def __init__(self, config: Pix2StructConfig): super().__init__(config) self.encoder = Pix2StructVisionModel(config.vision_config) self.decoder = Pix2StructTextModel(config.text_config) self.is_vqa = config.is_vqa # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.decoder.get_input_embeddings() def set_input_embeddings(self, new_embeddings): self.decoder.set_input_embeddings(new_embeddings) def get_output_embeddings(self) -> nn.Module: return self.decoder.get_output_embeddings() def set_output_embeddings(self, new_embeddings): self.decoder.set_output_embeddings(new_embeddings) def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding: model_embeds = self.decoder.resize_token_embeddings(new_num_tokens) # update vocab size self.config.text_config.vocab_size = new_num_tokens return model_embeds def get_decoder(self): return self.decoder def get_encoder(self): return self.encoder @add_start_docstrings_to_model_forward(PIX2STRUCT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, flattened_patches: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, labels: Optional[torch.LongTensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: r""" Returns: Example: Inference: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, Pix2StructForConditionalGeneration >>> processor = AutoProcessor.from_pretrained("google/pix2struct-textcaps-base") >>> model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-textcaps-base") >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> # autoregressive generation >>> generated_ids = model.generate(**inputs, max_new_tokens=50) >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> print(generated_text) A stop sign is on a street corner. >>> # conditional generation >>> text = "A picture of" >>> inputs = processor(text=text, images=image, return_tensors="pt", add_special_tokens=False) >>> generated_ids = model.generate(**inputs, max_new_tokens=50) >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> print(generated_text) A picture of a stop sign with a red stop sign ``` Training: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, Pix2StructForConditionalGeneration >>> processor = AutoProcessor.from_pretrained("google/pix2struct-base") >>> model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-base") >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text = "A stop sign is on the street corner." >>> inputs = processor(images=image, return_tensors="pt") >>> labels = processor(text=text, return_tensors="pt").input_ids >>> # forward pass >>> outputs = model(**inputs, labels=labels) >>> loss = outputs.loss >>> print(f"{loss.item():.5f}") 5.94282 ```""" use_cache = use_cache if use_cache is not None else self.config.text_config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( flattened_patches=flattened_patches, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) decoder_attention_mask = ( decoder_attention_mask if decoder_attention_mask is not None else decoder_input_ids.ne(self.config.pad_token_id).float() ) # Always attend to the first token decoder_attention_mask[:, 0] = 1 # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, labels=labels, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqLMOutput( loss=decoder_outputs.loss, logits=decoder_outputs.logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, flattened_patches: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, past_key_values=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): if decoder_attention_mask is None: decoder_attention_mask = torch.ones_like(input_ids).to(input_ids.device) # cut decoder_input_ids if past_key_values is used if past_key_values is not None: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return { "flattened_patches": flattened_patches, "decoder_input_ids": input_ids, "past_key_values": past_key_values, "encoder_outputs": encoder_outputs, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, }
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Pix2Struct.""" import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch logger = logging.get_logger(__name__) DEFAULT_FONT_PATH = "ybelkada/fonts" # adapted from: https://discuss.pytorch.org/t/tf-image-extract-patches-in-pytorch/171409/2 def torch_extract_patches(image_tensor, patch_height, patch_width): """ Utiliy function to extract patches from a given image tensor. Returns a tensor of shape (1, `patch_height`, `patch_width`, `num_channels`x `patch_height` x `patch_width`) Args: image_tensor (torch.Tensor): The image tensor to extract patches from. patch_height (int): The height of the patches to extract. patch_width (int): The width of the patches to extract. """ requires_backends(torch_extract_patches, ["torch"]) image_tensor = image_tensor.unsqueeze(0) patches = torch.nn.functional.unfold(image_tensor, (patch_height, patch_width), stride=(patch_height, patch_width)) patches = patches.reshape(image_tensor.size(0), image_tensor.size(1), patch_height, patch_width, -1) patches = patches.permute(0, 4, 2, 3, 1).reshape( image_tensor.size(2) // patch_height, image_tensor.size(3) // patch_width, image_tensor.size(1) * patch_height * patch_width, ) return patches.unsqueeze(0) # Adapted from https://github.com/google-research/pix2struct/blob/0e1779af0f4db4b652c1d92b3bbd2550a7399123/pix2struct/preprocessing/preprocessing_utils.py#L106 def render_text( text: str, text_size: int = 36, text_color: str = "black", background_color: str = "white", left_padding: int = 5, right_padding: int = 5, top_padding: int = 5, bottom_padding: int = 5, font_bytes: Optional[bytes] = None, font_path: Optional[str] = None, ) -> Image.Image: """ Render text. This script is entirely adapted from the original script that can be found here: https://github.com/google-research/pix2struct/blob/main/pix2struct/preprocessing/preprocessing_utils.py Args: text (`str`, *optional*, defaults to ): Text to render. text_size (`int`, *optional*, defaults to 36): Size of the text. text_color (`str`, *optional*, defaults to `"black"`): Color of the text. background_color (`str`, *optional*, defaults to `"white"`): Color of the background. left_padding (`int`, *optional*, defaults to 5): Padding on the left. right_padding (`int`, *optional*, defaults to 5): Padding on the right. top_padding (`int`, *optional*, defaults to 5): Padding on the top. bottom_padding (`int`, *optional*, defaults to 5): Padding on the bottom. font_bytes (`bytes`, *optional*): Bytes of the font to use. If `None`, the default font will be used. font_path (`str`, *optional*): Path to the font to use. If `None`, the default font will be used. """ requires_backends(render_text, "vision") # Add new lines so that each line is no more than 80 characters. wrapper = textwrap.TextWrapper(width=80) lines = wrapper.wrap(text=text) wrapped_text = "\n".join(lines) if font_bytes is not None and font_path is None: font = io.BytesIO(font_bytes) elif font_path is not None: font = font_path else: font = hf_hub_download(DEFAULT_FONT_PATH, "Arial.TTF") font = ImageFont.truetype(font, encoding="UTF-8", size=text_size) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. temp_draw = ImageDraw.Draw(Image.new("RGB", (1, 1), background_color)) _, _, text_width, text_height = temp_draw.textbbox((0, 0), wrapped_text, font) # Create the actual image with a bit of padding around the text. image_width = text_width + left_padding + right_padding image_height = text_height + top_padding + bottom_padding image = Image.new("RGB", (image_width, image_height), background_color) draw = ImageDraw.Draw(image) draw.text(xy=(left_padding, top_padding), text=wrapped_text, fill=text_color, font=font) return image # Adapted from https://github.com/google-research/pix2struct/blob/0e1779af0f4db4b652c1d92b3bbd2550a7399123/pix2struct/preprocessing/preprocessing_utils.py#L87 def render_header( image: np.ndarray, header: str, input_data_format: Optional[Union[str, ChildProcessError]] = None, **kwargs ): """ Renders the input text as a header on the input image. Args: image (`np.ndarray`): The image to render the header on. header (`str`): The header text. data_format (`Union[ChannelDimension, str]`, *optional*): The data format of the image. Can be either "ChannelDimension.channels_first" or "ChannelDimension.channels_last". Returns: `np.ndarray`: The image with the header rendered. """ requires_backends(render_header, "vision") # Convert to PIL image if necessary image = to_pil_image(image, input_data_format=input_data_format) header_image = render_text(header, **kwargs) new_width = max(header_image.width, image.width) new_height = int(image.height * (new_width / image.width)) new_header_height = int(header_image.height * (new_width / header_image.width)) new_image = Image.new("RGB", (new_width, new_height + new_header_height), "white") new_image.paste(header_image.resize((new_width, new_header_height)), (0, 0)) new_image.paste(image.resize((new_width, new_height)), (0, new_header_height)) # Convert back to the original framework if necessary new_image = to_numpy_array(new_image) if infer_channel_dimension_format(new_image) == ChannelDimension.LAST: new_image = to_channel_dimension_format(new_image, ChannelDimension.LAST) return new_image class Pix2StructImageProcessor(BaseImageProcessor): r""" Constructs a Pix2Struct image processor. Args: do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. According to Pix2Struct paper and code, the image is normalized with its own mean and standard deviation. patch_size (`Dict[str, int]`, *optional*, defaults to `{"height": 16, "width": 16}`): The patch size to use for the image. According to Pix2Struct paper and code, the patch size is 16x16. max_patches (`int`, *optional*, defaults to 2048): The maximum number of patches to extract from the image as per the [Pix2Struct paper](https://arxiv.org/pdf/2210.03347.pdf). is_vqa (`bool`, *optional*, defaults to `False`): Whether or not the image processor is for the VQA task. If `True` and `header_text` is passed in, text is rendered onto the input images. """ model_input_names = ["flattened_patches"] def __init__( self, do_convert_rgb: bool = True, do_normalize: bool = True, patch_size: Dict[str, int] = None, max_patches: int = 2048, is_vqa: bool = False, **kwargs, ) -> None: super().__init__(**kwargs) self.patch_size = patch_size if patch_size is not None else {"height": 16, "width": 16} self.do_normalize = do_normalize self.do_convert_rgb = do_convert_rgb self.max_patches = max_patches self.is_vqa = is_vqa def extract_flattened_patches( self, image: np.ndarray, max_patches: int, patch_size: dict, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Extract flattened patches from an image. Args: image (`np.ndarray`): Image to extract flattened patches from. max_patches (`int`): Maximum number of patches to extract. patch_size (`dict`): Dictionary containing the patch height and width. Returns: result (`np.ndarray`): A sequence of `max_patches` flattened patches. """ requires_backends(self.extract_flattened_patches, "torch") # convert to torch image = to_channel_dimension_format(image, ChannelDimension.FIRST, input_data_format) image = torch.from_numpy(image) patch_height, patch_width = patch_size["height"], patch_size["width"] image_height, image_width = get_image_size(image, ChannelDimension.FIRST) # maximize scale s.t. scale = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width)) num_feasible_rows = max(min(math.floor(scale * image_height / patch_height), max_patches), 1) num_feasible_cols = max(min(math.floor(scale * image_width / patch_width), max_patches), 1) resized_height = max(num_feasible_rows * patch_height, 1) resized_width = max(num_feasible_cols * patch_width, 1) image = torch.nn.functional.interpolate( image.unsqueeze(0), size=(resized_height, resized_width), mode="bilinear", align_corners=False, antialias=True, ).squeeze(0) # [1, rows, columns, patch_height * patch_width * image_channels] patches = torch_extract_patches(image, patch_height, patch_width) patches_shape = patches.shape rows = patches_shape[1] columns = patches_shape[2] depth = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] patches = patches.reshape([rows * columns, depth]) # [rows * columns, 1] row_ids = torch.arange(rows).reshape([rows, 1]).repeat(1, columns).reshape([rows * columns, 1]) col_ids = torch.arange(columns).reshape([1, columns]).repeat(rows, 1).reshape([rows * columns, 1]) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] row_ids = row_ids.to(torch.float32) col_ids = col_ids.to(torch.float32) # [rows * columns, 2 + patch_height * patch_width * image_channels] result = torch.cat([row_ids, col_ids, patches], -1) # [max_patches, 2 + patch_height * patch_width * image_channels] result = torch.nn.functional.pad(result, [0, 0, 0, max_patches - (rows * columns)]).float() result = to_numpy_array(result) return result def normalize( self, image: np.ndarray, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Normalize an image. image = (image - image_mean) / image_std. The image std is to mimic the tensorflow implementation of the `per_image_standardization`: https://www.tensorflow.org/api_docs/python/tf/image/per_image_standardization Args: image (`np.ndarray`): Image to normalize. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ if image.dtype == np.uint8: image = image.astype(np.float32) # take mean across the whole `image` mean = np.mean(image) std = np.std(image) adjusted_stddev = max(std, 1.0 / math.sqrt(np.prod(image.shape))) return normalize( image, mean=mean, std=adjusted_stddev, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def preprocess( self, images: ImageInput, header_text: Optional[str] = None, do_convert_rgb: bool = None, do_normalize: Optional[bool] = None, max_patches: Optional[int] = None, patch_size: Optional[Dict[str, int]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> ImageInput: """ Preprocess an image or batch of images. The processor first computes the maximum possible number of aspect-ratio preserving patches of size `patch_size` that can be extracted from the image. It then pads the image with zeros to make the image respect the constraint of `max_patches`. Before extracting the patches the images are standardized following the tensorflow implementation of `per_image_standardization` (https://www.tensorflow.org/api_docs/python/tf/image/per_image_standardization). Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images. header_text (`Union[List[str], str]`, *optional*): Text to render as a header. Only has an effect if `image_processor.is_vqa` is `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. max_patches (`int`, *optional*, defaults to `self.max_patches`): Maximum number of patches to extract. patch_size (`dict`, *optional*, defaults to `self.patch_size`): Dictionary containing the patch height and width. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_normalize = do_normalize if do_normalize is not None else self.do_normalize do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb patch_size = patch_size if patch_size is not None else self.patch_size max_patches = max_patches if max_patches is not None else self.max_patches is_vqa = self.is_vqa if kwargs.get("data_format", None) is not None: raise ValueError("data_format is not an accepted input as the outputs are ") images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) # PIL RGBA images are converted to RGB if do_convert_rgb: images = [convert_to_rgb(image) for image in images] # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if is_vqa: if header_text is None: raise ValueError("A header text must be provided for VQA models.") font_bytes = kwargs.pop("font_bytes", None) font_path = kwargs.pop("font_path", None) if isinstance(header_text, str): header_text = [header_text] * len(images) images = [ render_header(image, header_text[i], font_bytes=font_bytes, font_path=font_path) for i, image in enumerate(images) ] if do_normalize: images = [self.normalize(image=image, input_data_format=input_data_format) for image in images] # convert to torch tensor and permute images = [ self.extract_flattened_patches( image=image, max_patches=max_patches, patch_size=patch_size, input_data_format=input_data_format ) for image in images ] # create attention mask in numpy attention_masks = [(image.sum(axis=-1) != 0).astype(np.float32) for image in images] encoded_outputs = BatchFeature( data={"flattened_patches": images, "attention_mask": attention_masks}, tensor_type=return_tensors ) return encoded_outputs
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/gpt_neox/modeling_gpt_neox.py
# coding=utf-8 # Copyright 2022 EleutherAI The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch GPTNeoX model.""" from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch.nn import functional as F from ...activations import ACT2FN from ...file_utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging from .configuration_gpt_neox import GPTNeoXConfig if is_flash_attn_2_available(): from flash_attn import flash_attn_func, flash_attn_varlen_func from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "trl-internal-testing/tiny-random-GPTNeoXForCausalLM" _REAL_CHECKPOINT_FOR_DOC = "EleutherAI/gpt-neox-20b" _CONFIG_FOR_DOC = "GPTNeoXConfig" from ..deprecated._archive_maps import GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() max_seqlen_in_batch = seqlens_in_batch.max().item() cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) return ( indices, cu_seqlens, max_seqlen_in_batch, ) class GPTNeoXPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = GPTNeoXConfig base_model_prefix = "gpt_neox" supports_gradient_checkpointing = True _no_split_modules = ["GPTNeoXLayer"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn_2 = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) class GPTNeoXAttention(nn.Module): def __init__(self, config): super().__init__() self.config = config self.num_attention_heads = config.num_attention_heads self.hidden_size = config.hidden_size if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them" ) self.head_size = self.hidden_size // self.num_attention_heads self.rotary_ndims = int(self.head_size * config.rotary_pct) self._init_bias(config.max_position_embeddings) self.register_buffer("masked_bias", torch.tensor(-1e9), persistent=False) self._init_rope() self.norm_factor = self.head_size**-0.5 self.query_key_value = nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=config.attention_bias) self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias=config.attention_bias) self.attention_dropout = nn.Dropout(config.attention_dropout) self.is_causal = True def _init_bias(self, max_positions, device=None): self.register_buffer( "bias", torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view( 1, 1, max_positions, max_positions ), persistent=False, ) if device is not None: self.bias = self.bias.to(device) def _init_rope(self): if self.config.rope_scaling is None: self.rotary_emb = GPTNeoXRotaryEmbedding( self.rotary_ndims, self.config.max_position_embeddings, base=self.config.rotary_emb_base ) else: scaling_type = self.config.rope_scaling["type"] scaling_factor = self.config.rope_scaling["factor"] if scaling_type == "linear": self.rotary_emb = GPTNeoXLinearScalingRotaryEmbedding( self.rotary_ndims, self.config.max_position_embeddings, base=self.config.rotary_emb_base, scaling_factor=scaling_factor, ) elif scaling_type == "dynamic": self.rotary_emb = GPTNeoXDynamicNTKScalingRotaryEmbedding( self.rotary_ndims, self.config.max_position_embeddings, base=self.config.rotary_emb_base, scaling_factor=scaling_factor, ) else: raise ValueError(f"Unknown RoPE scaling type {scaling_type}") def forward( self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, position_ids: torch.LongTensor, head_mask: Optional[torch.FloatTensor] = None, layer_past: Optional[Tuple[torch.Tensor]] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, padding_mask: Optional[torch.Tensor] = None, ): has_layer_past = layer_past is not None # Compute QKV # Attention heads [batch, seq_len, hidden_size] # --> [batch, seq_len, (np * 3 * head_size)] qkv = self.query_key_value(hidden_states) # [batch, seq_len, (num_heads * 3 * head_size)] # --> [batch, seq_len, num_heads, 3 * head_size] new_qkv_shape = qkv.size()[:-1] + (self.num_attention_heads, 3 * self.head_size) qkv = qkv.view(*new_qkv_shape) # [batch, seq_len, num_attention_heads, 3 * head_size] --> 3 [batch, num_attention_heads, seq_len, head_size] query = qkv[..., : self.head_size].permute(0, 2, 1, 3) key = qkv[..., self.head_size : 2 * self.head_size].permute(0, 2, 1, 3) value = qkv[..., 2 * self.head_size :].permute(0, 2, 1, 3) # Compute rotary embeddings on rotary_ndims query_rot = query[..., : self.rotary_ndims] query_pass = query[..., self.rotary_ndims :] key_rot = key[..., : self.rotary_ndims] key_pass = key[..., self.rotary_ndims :] # Compute token offset for rotary embeddings (when decoding) seq_len = key.shape[-2] if has_layer_past: seq_len += layer_past[0].shape[-2] cos, sin = self.rotary_emb(value, seq_len=seq_len) query, key = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids) query = torch.cat((query, query_pass), dim=-1) key = torch.cat((key, key_pass), dim=-1) # Cache QKV values if has_layer_past: past_key = layer_past[0] past_value = layer_past[1] key = torch.cat((past_key, key), dim=-2) value = torch.cat((past_value, value), dim=-2) present = (key, value) if use_cache else None # Compute attention attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) # Reshape outputs attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_size) attn_output = self.dense(attn_output) outputs = (attn_output, present) if output_attentions: outputs += (attn_weights,) return outputs @classmethod def _split_heads(cls, tensor, num_attention_heads, attn_head_size): """ Splits hidden dim into attn_head_size and num_attention_heads """ # tensor: [bs, seq_len, hidden_size] new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size) # -> [bs, seq_len, num_attention_heads, attn_head_size] tensor = tensor.view(new_shape) # -> [bs, num_attention_heads, seq_len, attn_head_size] tensor = tensor.permute(0, 2, 1, 3) return tensor @classmethod def _merge_heads(cls, tensor, num_attention_heads, attn_head_size): """ Merges attn_head_size dim and num_attn_heads dim into hidden dim """ # tensor [bs, num_attention_heads, seq_len, attn_head_size] tensor = tensor.permute(0, 2, 1, 3).contiguous() # -> [bs, seq_len, num_attention_heads, attn_head_size] tensor = tensor.view(tensor.size(0), tensor.size(1), num_attention_heads * attn_head_size) # -> [bs, seq_len, hidden_size] return tensor def _attn(self, query, key, value, attention_mask=None, head_mask=None): # q, k, v: [bs, num_attention_heads, seq_len, attn_head_size] # compute causal mask from causal mask buffer batch_size, num_attention_heads, query_length, attn_head_size = query.size() key_length = key.size(-2) # dynamically increase the causal mask with the key length, if needed. if key_length > self.bias.shape[-1]: self._init_bias(key_length, device=key.device) causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length] query = query.view(batch_size * num_attention_heads, query_length, attn_head_size) key = key.view(batch_size * num_attention_heads, key_length, attn_head_size) attn_scores = torch.zeros( batch_size * num_attention_heads, query_length, key_length, dtype=query.dtype, device=key.device, ) attn_scores = torch.baddbmm( attn_scores, query, key.transpose(1, 2), beta=1.0, alpha=self.norm_factor, ) attn_scores = attn_scores.view(batch_size, num_attention_heads, query_length, key_length) mask_value = torch.finfo(attn_scores.dtype).min # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`. # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device` mask_value = torch.tensor(mask_value, dtype=attn_scores.dtype).to(attn_scores.device) attn_scores = torch.where(causal_mask, attn_scores, mask_value) if attention_mask is not None: # Apply the attention mask attn_scores = attn_scores + attention_mask attn_weights = nn.functional.softmax(attn_scores, dim=-1) attn_weights = attn_weights.to(value.dtype) # Mask heads if we want to if head_mask is not None: attn_weights = attn_weights * head_mask attn_weights = self.attention_dropout(attn_weights) attn_output = torch.matmul(attn_weights, value) return attn_output, attn_weights class GPTNeoXFlashAttention2(GPTNeoXAttention): """ GPTNeoX flash attention module. This module inherits from `GPTNeoXAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def forward( self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, position_ids: torch.LongTensor, head_mask: Optional[torch.FloatTensor] = None, layer_past: Optional[Tuple[torch.Tensor]] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, ): has_layer_past = layer_past is not None # Compute QKV # Attention heads [batch, seq_len, hidden_size] # --> [batch, seq_len, (np * 3 * head_size)] qkv = self.query_key_value(hidden_states) # [batch, seq_len, (num_heads * 3 * head_size)] # --> [batch, seq_len, num_heads, 3 * head_size] new_qkv_shape = qkv.size()[:-1] + (self.num_attention_heads, 3 * self.head_size) qkv = qkv.view(*new_qkv_shape) # [batch, seq_len, num_attention_heads, 3 * head_size] --> 3 [batch, num_attention_heads, seq_len, head_size] query = qkv[..., : self.head_size].permute(0, 2, 1, 3) key = qkv[..., self.head_size : 2 * self.head_size].permute(0, 2, 1, 3) value = qkv[..., 2 * self.head_size :].permute(0, 2, 1, 3) query_length = query.shape[-2] # Compute rotary embeddings on rotary_ndims query_rot = query[..., : self.rotary_ndims] query_pass = query[..., self.rotary_ndims :] key_rot = key[..., : self.rotary_ndims] key_pass = key[..., self.rotary_ndims :] # Compute token offset for rotary embeddings (when decoding) seq_len = key.shape[-2] if has_layer_past: seq_len += layer_past[0].shape[-2] cos, sin = self.rotary_emb(value, seq_len=seq_len) query, key = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids) query = torch.cat((query, query_pass), dim=-1) key = torch.cat((key, key_pass), dim=-1) # Cache QKV values if has_layer_past: past_key = layer_past[0] past_value = layer_past[1] key = torch.cat((past_key, key), dim=-2) value = torch.cat((past_value, value), dim=-2) present = (key, value) if use_cache else None # GPT-neo-X casts query and key in fp32 to apply rotary embedding in full precision target_dtype = value.dtype if query.dtype != target_dtype: query = query.to(target_dtype) if key.dtype != target_dtype: key = key.to(target_dtype) # Permute to get the expected shape for Flash Attention query = query.permute(0, 2, 1, 3) key = key.permute(0, 2, 1, 3) value = value.permute(0, 2, 1, 3) # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in float16 / bfloat16 just to be sure everything works as expected. # This might slowdown training & inference so it is recommended to not cast the LayerNorms input_dtype = query.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.query_key_value.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query = query.to(target_dtype) key = key.to(target_dtype) value = value.to(target_dtype) attention_dropout = self.config.attention_dropout if self.training else 0.0 # Compute attention attn_weights = self._flash_attention_forward( query, key, value, attention_mask, query_length, dropout=attention_dropout, softmax_scale=self.norm_factor ) # Reshape outputs attn_output = attn_weights.reshape( attn_weights.shape[0], attn_weights.shape[1], self.num_attention_heads * self.head_size ) attn_output = self.dense(attn_output) outputs = (attn_output, present) if output_attentions: outputs += (attn_weights,) return outputs # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward def _flash_attention_forward( self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None ): """ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token first unpad the input, then computes the attention scores and pad the final attention scores. Args: query_states (`torch.Tensor`): Input query states to be passed to Flash Attention API key_states (`torch.Tensor`): Input key states to be passed to Flash Attention API value_states (`torch.Tensor`): Input value states to be passed to Flash Attention API attention_mask (`torch.Tensor`): The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the position of padding tokens and 1 for the position of non-padding tokens. dropout (`float`): Attention dropout softmax_scale (`float`, *optional*): The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) """ if not self._flash_attn_uses_top_left_mask: causal = self.is_causal else: # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. causal = self.is_causal and query_length != 1 # Contains at least one padding token in the sequence if attention_mask is not None: batch_size = query_states.shape[0] query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( query_states, key_states, value_states, attention_mask, query_length ) cu_seqlens_q, cu_seqlens_k = cu_seq_lens max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens attn_output_unpad = flash_attn_varlen_func( query_states, key_states, value_states, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_in_batch_q, max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, causal=causal, ) attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) else: attn_output = flash_attn_func( query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal ) return attn_output # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input with num_heads->num_attention_heads def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape key_layer = index_first_axis( key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k ) value_layer = index_first_axis( value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k ) if query_length == kv_seq_len: query_layer = index_first_axis( query_layer.reshape(batch_size * kv_seq_len, self.num_attention_heads, head_dim), indices_k ) cu_seqlens_q = cu_seqlens_k max_seqlen_in_batch_q = max_seqlen_in_batch_k indices_q = indices_k elif query_length == 1: max_seqlen_in_batch_q = 1 cu_seqlens_q = torch.arange( batch_size + 1, dtype=torch.int32, device=query_layer.device ) # There is a memcpy here, that is very bad. indices_q = cu_seqlens_q[:-1] query_layer = query_layer.squeeze(1) else: # The -q_len: slice assumes left padding. attention_mask = attention_mask[:, -query_length:] query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) return ( query_layer, key_layer, value_layer, indices_q, (cu_seqlens_q, cu_seqlens_k), (max_seqlen_in_batch_q, max_seqlen_in_batch_k), ) def attention_mask_func(attention_scores, ltor_mask): attention_scores.masked_fill_(~ltor_mask, torch.finfo(attention_scores.dtype).min) return attention_scores class GPTNeoXRotaryEmbedding(nn.Module): # Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding.__init__ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) # Build here to make `torch.jit.trace` work. self._set_cos_sin_cache( seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() ) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq) freqs = torch.outer(t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos(), persistent=False) self.register_buffer("sin_cached", emb.sin(), persistent=False) def forward(self, x, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] if seq_len > self.max_seq_len_cached: self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) return ( self.cos_cached[:seq_len], self.sin_cached[:seq_len], ) # copied from transformers.models.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding.__init__ # TODO @gante bring compatibility back class GPTNeoXLinearScalingRotaryEmbedding(GPTNeoXRotaryEmbedding): """GPTNeoXRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq) t = t / self.scaling_factor freqs = torch.outer(t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos(), persistent=False) self.register_buffer("sin_cached", emb.sin(), persistent=False) class GPTNeoXDynamicNTKScalingRotaryEmbedding(GPTNeoXRotaryEmbedding): """GPTNeoXRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla""" # copied from transformers.models.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding.__init__ # TODO @gante no longer copied from def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len if seq_len > self.max_position_embeddings: base = self.base * ( (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1) ) ** (self.dim / (self.dim - 2)) inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq) freqs = torch.outer(t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos(), persistent=False) self.register_buffer("sin_cached", emb.sin(), persistent=False) def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) # Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`): The position indices of the tokens corresponding to the query and key tensors. For example, this can be used to pass offsetted position ids when working with a KV-cache. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos[position_ids].unsqueeze(unsqueeze_dim) sin = sin[position_ids].unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed class GPTNeoXMLP(nn.Module): def __init__(self, config): super().__init__() self.dense_h_to_4h = nn.Linear(config.hidden_size, config.intermediate_size) self.dense_4h_to_h = nn.Linear(config.intermediate_size, config.hidden_size) self.act = ACT2FN[config.hidden_act] def forward(self, hidden_states): hidden_states = self.dense_h_to_4h(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dense_4h_to_h(hidden_states) return hidden_states GPT_NEOX_ATTENTION_CLASSES = { "eager": GPTNeoXAttention, "flash_attention_2": GPTNeoXFlashAttention2, } class GPTNeoXLayer(nn.Module): def __init__(self, config): super().__init__() self.use_parallel_residual = config.use_parallel_residual self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.post_attention_dropout = nn.Dropout(config.hidden_dropout) self.post_mlp_dropout = nn.Dropout(config.hidden_dropout) self.attention = GPT_NEOX_ATTENTION_CLASSES[config._attn_implementation](config) self.mlp = GPTNeoXMLP(config) def forward( self, hidden_states: Optional[torch.FloatTensor], attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, layer_past: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, ): attention_layer_outputs = self.attention( self.input_layernorm(hidden_states), attention_mask=attention_mask, position_ids=position_ids, layer_past=layer_past, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, ) attn_output = attention_layer_outputs[0] # output_attn: attn_output, present, (attn_weights) attn_output = self.post_attention_dropout(attn_output) outputs = attention_layer_outputs[1:] if self.use_parallel_residual: # pseudocode: # x = x + attn(ln1(x)) + mlp(ln2(x)) mlp_output = self.mlp(self.post_attention_layernorm(hidden_states)) mlp_output = self.post_mlp_dropout(mlp_output) hidden_states = mlp_output + attn_output + hidden_states else: # pseudocode: # x = x + attn(ln1(x)) # x = x + mlp(ln2(x)) attn_output = attn_output + hidden_states mlp_output = self.mlp(self.post_attention_layernorm(attn_output)) mlp_output = self.post_mlp_dropout(mlp_output) hidden_states = mlp_output + attn_output if use_cache: outputs = (hidden_states,) + outputs # hidden_states, present, (attn_weights) else: outputs = (hidden_states,) + outputs[1:] # hidden_states, (attn_weights) return outputs GPT_NEOX_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`~GPTNeoXConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ GPT_NEOX_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare GPTNeoX Model transformer outputting raw hidden-states without any specific head on top.", GPT_NEOX_START_DOCSTRING, ) class GPTNeoXModel(GPTNeoXPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embed_in = nn.Embedding(config.vocab_size, config.hidden_size) self.emb_dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList([GPTNeoXLayer(config) for _ in range(config.num_hidden_layers)]) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_in def set_input_embeddings(self, value): self.embed_in = value @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, real_checkpoint=_REAL_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: r""" past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict use_cache = use_cache if use_cache is not None else self.config.use_cache if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape if past_key_values is None: past_length = 0 past_key_values = tuple([None] * self.config.num_hidden_layers) else: past_length = past_key_values[0][0].size(-2) if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange(past_length, seq_length + past_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) # Attention mask. if attention_mask is not None: assert batch_size > 0, "batch_size has to be defined and > 0" attention_mask = attention_mask.view(batch_size, -1) if self._use_flash_attention_2: attention_mask = attention_mask if 0 in attention_mask else None else: # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask = attention_mask[:, None, None, :] # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and the dtype's smallest value for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) if inputs_embeds is None: inputs_embeds = self.embed_in(input_ids) hidden_states = self.emb_dropout(inputs_embeds) if self.gradient_checkpointing and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False presents = () if use_cache else None all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, (layer, layer_past) in enumerate(zip(self.layers, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: outputs = self._gradient_checkpointing_func( layer.__call__, hidden_states, attention_mask, position_ids, head_mask[i], use_cache, None, output_attentions, ) else: outputs = layer( hidden_states, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask[i], layer_past=layer_past, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_attentions = all_attentions + (outputs[2 if use_cache else 1],) hidden_states = self.final_layer_norm(hidden_states) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_attentions, ) @add_start_docstrings( """GPTNeoX Model with a `language modeling` head on top for CLM fine-tuning.""", GPT_NEOX_START_DOCSTRING ) class GPTNeoXForCausalLM(GPTNeoXPreTrainedModel): _tied_weights_keys = ["embed_out.weight"] def __init__(self, config): super().__init__(config) self.gpt_neox = GPTNeoXModel(config) self.embed_out = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.embed_out def set_output_embeddings(self, new_embeddings): self.embed_out = new_embeddings @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: r""" past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model. Contains pre-computed hidden-states (key and values in the self-attention blocks that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Returns: Example: ```python >>> from transformers import AutoTokenizer, GPTNeoXForCausalLM, GPTNeoXConfig >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b") >>> config = GPTNeoXConfig.from_pretrained("EleutherAI/gpt-neox-20b") >>> config.is_decoder = True >>> model = GPTNeoXForCausalLM.from_pretrained("EleutherAI/gpt-neox-20b", config=config) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.gpt_neox( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] lm_logits = self.embed_out(hidden_states) lm_loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(lm_logits.device) # we are doing next-token prediction; shift prediction scores and input ids by one shift_logits = lm_logits[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithPast( loss=lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs ): input_shape = input_ids.shape # cut decoder_input_ids if past is used if past_key_values is not None: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1] :] # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "attention_mask": attention_mask, "past_key_values": past_key_values, "position_ids": position_ids, "use_cache": kwargs.get("use_cache"), } ) return model_inputs def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past @add_start_docstrings( """ The GPTNeoX Model transformer with a sequence classification head on top (linear layer). [`GPTNeoXForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-1) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """, GPT_NEOX_START_DOCSTRING, ) class GPTNeoXForSequenceClassification(GPTNeoXPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.gpt_neox = GPTNeoXModel(config) self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.gpt_neox( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size, sequence_length = input_ids.shape[:2] else: batch_size, sequence_length = inputs_embeds.shape[:2] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 logger.warning( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class GPTNeoXForTokenClassification(GPTNeoXPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.gpt_neox = GPTNeoXModel(config) self.dropout = nn.Dropout(config.classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint="LarsJonasson/pythia-410m-deduped-sft-swedish", output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_loss=0.25, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.gpt_neox( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The GPT-NeoX Model transformer with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, GPT_NEOX_START_DOCSTRING, ) class GPTNeoXForQuestionAnswering(GPTNeoXPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.gpt_neox = GPTNeoXModel(config) self.qa_outputs = nn.Linear(config.hidden_size, 2) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, real_checkpoint=_REAL_CHECKPOINT_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.gpt_neox( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1).to(start_logits.device) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1).to(end_logits.device) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/gpt_neox/__init__.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable _import_structure = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_gpt_neox_fast"] = ["GPTNeoXTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_gpt_neox"] = [ "GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoXForCausalLM", "GPTNeoXForQuestionAnswering", "GPTNeoXForSequenceClassification", "GPTNeoXForTokenClassification", "GPTNeoXLayer", "GPTNeoXModel", "GPTNeoXPreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/gpt_neox/tokenization_gpt_neox_fast.py
# coding=utf-8 # Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for GPTNeoX.""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} class GPTNeoXTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" GPT-NeoX-20B tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import GPTNeoXTokenizerFast >>> tokenizer = GPTNeoXTokenizerFast.from_pretrained("openai-community/gpt2") >>> tokenizer("Hello world")["input_ids"] [15496, 995] >>> tokenizer(" Hello world")["input_ids"] [18435, 995] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`. </Tip> This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. unk_token (`str`, *optional*, defaults to `<|endoftext|>`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `<|endoftext|>`): The beginning of sequence token. eos_token (`str`, *optional*, defaults to `<|endoftext|>`): The end of sequence token. pad_token (`str`, *optional*): Token for padding a sequence. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (GPTNeoX tokenizer detect beginning of words by the preceding space). add_bos_token (`bool`, *optional*, defaults to `False`): Whether or not to add a `bos_token` at the start of sequences. add_eos_token (`bool`, *optional*, defaults to `False`): Whether or not to add an `eos_token` at the end of sequences. trim_offsets (`bool`, *optional*, defaults to `True`): Whether or not the post-processing step should trim offsets to avoid including whitespaces. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token="<|endoftext|>", bos_token="<|endoftext|>", eos_token="<|endoftext|>", pad_token=None, add_bos_token=False, add_eos_token=False, add_prefix_space=False, **kwargs, ): super().__init__( vocab_file, merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, add_bos_token=add_bos_token, add_eos_token=add_eos_token, add_prefix_space=add_prefix_space, **kwargs, ) self._add_bos_token = add_bos_token self._add_eos_token = add_eos_token self.update_post_processor() pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space: pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type")) pre_tok_state["add_prefix_space"] = add_prefix_space self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state) self.add_prefix_space = add_prefix_space @property def add_eos_token(self): return self._add_eos_token @property def add_bos_token(self): return self._add_bos_token @add_eos_token.setter def add_eos_token(self, value): self._add_eos_token = value self.update_post_processor() @add_bos_token.setter def add_bos_token(self, value): self._add_bos_token = value self.update_post_processor() # Copied from transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast.update_post_processor def update_post_processor(self): """ Updates the underlying post processor with the current `bos_token` and `eos_token`. """ bos = self.bos_token bos_token_id = self.bos_token_id if bos is None and self.add_bos_token: raise ValueError("add_bos_token = True but bos_token = None") eos = self.eos_token eos_token_id = self.eos_token_id if eos is None and self.add_eos_token: raise ValueError("add_eos_token = True but eos_token = None") single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}" pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}" special_tokens = [] if self.add_bos_token: special_tokens.append((bos, bos_token_id)) if self.add_eos_token: special_tokens.append((eos, eos_token_id)) self._tokenizer.post_processor = processors.TemplateProcessing( single=single, pair=pair, special_tokens=special_tokens ) # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.get_special_tokens_mask def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) bos_token_id = [1] if self.add_bos_token else [] eos_token_id = [1] if self.add_eos_token else [] if token_ids_1 is None: return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id return ( bos_token_id + ([0] * len(token_ids_0)) + eos_token_id + bos_token_id + ([0] * len(token_ids_1)) + eos_token_id ) # Copied from transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast.build_inputs_with_special_tokens def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = bos_token_id + token_ids_0 + eos_token_id if token_ids_1 is not None: output = output + bos_token_id + token_ids_1 + eos_token_id return output def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) @property # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.default_chat_template def default_chat_template(self): """ A simple chat template that ignores role information and just concatenates messages with EOS tokens. """ return "{% for message in messages %}" "{{ message.content }}{{ eos_token }}" "{% endfor %}"
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/gpt_neox/configuration_gpt_neox.py
# coding=utf-8 # Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ GPTNeoX model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) from ..deprecated._archive_maps import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class GPTNeoXConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GPTNeoXModel`]. It is used to instantiate an GPTNeoX model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GPTNeoX [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50432): Vocabulary size of the GPTNeoX model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`GPTNeoXModel`]. hidden_size (`int`, *optional*, defaults to 6144): Dimension of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 44): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 64): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 24576): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. rotary_pct (`float`, *optional*, defaults to 0.25): percentage of hidden dimensions to allocate to rotary embeddings rotary_emb_base (`int`, *optional*, defaults to 10000) base for computing rotary embeddings frequency attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio probability of the attention score. hidden_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio of (1) the word embeddings, (2) the post-attention hidden states, and (3) the post-mlp hidden states. classifier_dropout (`float`, *optional*, defaults to 0.1): Argument used when doing token classification, used in the model [`GPTNeoXForTokenClassification`]. The dropout ratio for the hidden layer. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). initializer_range (`float`, *optional*, defaults to 1e-5): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. use_parallel_residual (`bool`, *optional*, defaults to `True`): Whether to use a "parallel" formulation in each Transformer layer, which can provide a slight training speedup at large scales (e.g. 20B). rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update `max_position_embeddings` to the expected new maximum. See the following thread for more information on how these scaling strategies behave: https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an experimental feature, subject to breaking API changes in future versions. attention_bias (`bool`, *optional*, defaults to `True`): Whether to use a bias in the query, key, value and output projection layers during self-attention. Example: ```python >>> from transformers import GPTNeoXConfig, GPTNeoXModel >>> # Initializing a GPTNeoX gpt-neox-20b style configuration >>> configuration = GPTNeoXConfig() >>> # Initializing a model (with random weights) from the gpt-neox-20b style configuration >>> model = GPTNeoXModel(configuration) # doctest: +SKIP >>> # Accessing the model configuration >>> configuration = model.config # doctest: +SKIP ```""" model_type = "gpt_neox" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=50432, hidden_size=6144, num_hidden_layers=44, num_attention_heads=64, intermediate_size=24576, hidden_act="gelu", rotary_pct=0.25, rotary_emb_base=10000, attention_dropout=0.0, hidden_dropout=0.0, classifier_dropout=0.1, max_position_embeddings=2048, initializer_range=0.02, layer_norm_eps=1e-5, use_cache=True, bos_token_id=0, eos_token_id=2, tie_word_embeddings=False, use_parallel_residual=True, rope_scaling=None, attention_bias=True, **kwargs, ): super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.rotary_pct = rotary_pct self.rotary_emb_base = rotary_emb_base self.attention_dropout = attention_dropout self.hidden_dropout = hidden_dropout self.classifier_dropout = classifier_dropout self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.tie_word_embeddings = tie_word_embeddings self.use_parallel_residual = use_parallel_residual self.rope_scaling = rope_scaling self.attention_bias = attention_bias self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation def _rope_scaling_validation(self): """ Validate the `rope_scaling` configuration. """ if self.rope_scaling is None: return if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: raise ValueError( "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}" ) rope_scaling_type = self.rope_scaling.get("type", None) rope_scaling_factor = self.rope_scaling.get("factor", None) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/patchtsmixer/configuration_patchtsmixer.py
# coding=utf-8 # Copyright 2023 IBM and HuggingFace Inc. team. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PatchTSMixer model configuration""" from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) from ..deprecated._archive_maps import PATCHTSMIXER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class PatchTSMixerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`PatchTSMixerModel`]. It is used to instantiate a PatchTSMixer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the PatchTSMixer [ibm/patchtsmixer-etth1-pretrain](https://huggingface.co/ibm/patchtsmixer-etth1-pretrain) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: context_length (`int`, *optional*, defaults to 32): The context/history length for the input sequence. patch_length (`int`, *optional*, defaults to 8): The patch length for the input sequence. num_input_channels (`int`, *optional*, defaults to 1): Number of input variates. For Univariate, set it to 1. patch_stride (`int`, *optional*, defaults to 8): Determines the overlap between two consecutive patches. Set it to patch_length (or greater), if we want non-overlapping patches. num_parallel_samples (`int`, *optional*, defaults to 100): The number of samples to generate in parallel for probabilistic forecast. d_model (`int`, *optional*, defaults to 8): Hidden dimension of the model. Recommended to set it as a multiple of patch_length (i.e. 2-5X of patch_length). Larger value indicates more complex model. expansion_factor (`int`, *optional*, defaults to 2): Expansion factor to use inside MLP. Recommended range is 2-5. Larger value indicates more complex model. num_layers (`int`, *optional*, defaults to 3): Number of layers to use. Recommended range is 3-15. Larger value indicates more complex model. dropout (`float`, *optional*, defaults to 0.2): The dropout probability the `PatchTSMixer` backbone. Recommended range is 0.2-0.7 mode (`str`, *optional*, defaults to `"common_channel"`): Mixer Mode. Determines how to process the channels. Allowed values: "common_channel", "mix_channel". In "common_channel" mode, we follow Channel-independent modelling with no explicit channel-mixing. Channel mixing happens in an implicit manner via shared weights across channels. (preferred first approach) In "mix_channel" mode, we follow explicit channel-mixing in addition to patch and feature mixer. (preferred approach when channel correlations are very important to model) gated_attn (`bool`, *optional*, defaults to `True`): Enable Gated Attention. norm_mlp (`str`, *optional*, defaults to `"LayerNorm"`): Normalization layer (BatchNorm or LayerNorm). self_attn (`bool`, *optional*, defaults to `False`): Enable Tiny self attention across patches. This can be enabled when the output of Vanilla PatchTSMixer with gated attention is not satisfactory. Enabling this leads to explicit pair-wise attention and modelling across patches. self_attn_heads (`int`, *optional*, defaults to 1): Number of self-attention heads. Works only when `self_attn` is set to `True`. use_positional_encoding (`bool`, *optional*, defaults to `False`): Enable the use of positional embedding for the tiny self-attention layers. Works only when `self_attn` is set to `True`. positional_encoding_type (`str`, *optional*, defaults to `"sincos"`): Positional encodings. Options `"random"` and `"sincos"` are supported. Works only when `use_positional_encoding` is set to `True` scaling (`string` or `bool`, *optional*, defaults to `"std"`): Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the scaler is set to "mean". loss (`string`, *optional*, defaults to `"mse"`): The loss function for the model corresponding to the `distribution_output` head. For parametric distributions it is the negative log likelihood ("nll") and for point estimates it is the mean squared error "mse". init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated normal weight initialization distribution. post_init (`bool`, *optional*, defaults to `False`): Whether to use custom weight initialization from `transformers` library, or the default initialization in `PyTorch`. Setting it to `False` performs `PyTorch` weight initialization. norm_eps (`float`, *optional*, defaults to 1e-05): A value added to the denominator for numerical stability of normalization. mask_type (`str`, *optional*, defaults to `"random"`): Type of masking to use for Masked Pretraining mode. Allowed values are "random", "forecast". In Random masking, points are masked randomly. In Forecast masking, points are masked towards the end. random_mask_ratio (`float`, *optional*, defaults to 0.5): Masking ratio to use when `mask_type` is `random`. Higher value indicates more masking. num_forecast_mask_patches (`int` or `list`, *optional*, defaults to `[2]`): Number of patches to be masked at the end of each batch sample. If it is an integer, all the samples in the batch will have the same number of masked patches. If it is a list, samples in the batch will be randomly masked by numbers defined in the list. This argument is only used for forecast pretraining. mask_value (`float`, *optional*, defaults to `0.0`): Mask value to use. masked_loss (`bool`, *optional*, defaults to `True`): Whether to compute pretraining loss only at the masked portions, or on the entire output. channel_consistent_masking (`bool`, *optional*, defaults to `True`): When true, masking will be same across all channels of a timeseries. Otherwise, masking positions will vary across channels. unmasked_channel_indices (`list`, *optional*): Channels that are not masked during pretraining. head_dropout (`float`, *optional*, defaults to 0.2): The dropout probability the `PatchTSMixer` head. distribution_output (`string`, *optional*, defaults to `"student_t"`): The distribution emission head for the model when loss is "nll". Could be either "student_t", "normal" or "negative_binomial". prediction_length (`int`, *optional*, defaults to 16): Number of time steps to forecast for a forecasting task. Also known as the Forecast Horizon. prediction_channel_indices (`list`, *optional*): List of channel indices to forecast. If None, forecast all channels. Target data is expected to have all channels and we explicitly filter the channels in prediction and target before loss computation. num_targets (`int`, *optional*, defaults to 3): Number of targets (dimensionality of the regressed variable) for a regression task. output_range (`list`, *optional*): Output range to restrict for the regression task. Defaults to None. head_aggregation (`str`, *optional*, defaults to `"max_pool"`): Aggregation mode to enable for classification or regression task. Allowed values are `None`, "use_last", "max_pool", "avg_pool". Example: ```python >>> from transformers import PatchTSMixerConfig, PatchTSMixerModel >>> # Initializing a default PatchTSMixer configuration >>> configuration = PatchTSMixerConfig() >>> # Randomly initializing a model (with random weights) from the configuration >>> model = PatchTSMixerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "patchtsmixer" attribute_map = { "hidden_size": "d_model", "num_hidden_layers": "num_layers", } def __init__( self, # Time series specific configuration context_length: int = 32, patch_length: int = 8, num_input_channels: int = 1, patch_stride: int = 8, num_parallel_samples: int = 100, # General model configuration d_model: int = 8, expansion_factor: int = 2, num_layers: int = 3, dropout: float = 0.2, mode: str = "common_channel", gated_attn: bool = True, norm_mlp: str = "LayerNorm", self_attn: bool = False, self_attn_heads: int = 1, use_positional_encoding: bool = False, positional_encoding_type: str = "sincos", scaling: Optional[Union[str, bool]] = "std", loss: str = "mse", init_std: float = 0.02, post_init: bool = False, norm_eps: float = 1e-5, # Pretrain model configuration mask_type: str = "random", random_mask_ratio: float = 0.5, num_forecast_mask_patches: Optional[Union[List[int], int]] = [2], mask_value: int = 0, masked_loss: bool = True, channel_consistent_masking: bool = True, unmasked_channel_indices: Optional[List[int]] = None, # General head configuration head_dropout: float = 0.2, distribution_output: str = "student_t", # Prediction head configuration prediction_length: int = 16, prediction_channel_indices: list = None, # Classification/Regression configuration num_targets: int = 3, output_range: list = None, head_aggregation: str = "max_pool", **kwargs, ): self.num_input_channels = num_input_channels self.context_length = context_length self.patch_length = patch_length self.patch_stride = patch_stride self.d_model = d_model self.expansion_factor = expansion_factor self.num_layers = num_layers self.dropout = dropout self.mode = mode self.gated_attn = gated_attn self.norm_mlp = norm_mlp self.scaling = scaling self.head_dropout = head_dropout self.num_patches = (max(context_length, patch_length) - patch_length) // patch_stride + 1 self.mask_type = mask_type self.random_mask_ratio = random_mask_ratio self.num_forecast_mask_patches = num_forecast_mask_patches self.mask_value = mask_value self.channel_consistent_masking = channel_consistent_masking self.masked_loss = masked_loss self.patch_last = True self.use_positional_encoding = use_positional_encoding self.positional_encoding_type = positional_encoding_type self.prediction_length = prediction_length self.prediction_channel_indices = prediction_channel_indices self.num_targets = num_targets self.output_range = output_range self.head_aggregation = head_aggregation self.self_attn = self_attn self.self_attn_heads = self_attn_heads self.init_std = init_std self.post_init = post_init self.distribution_output = distribution_output self.loss = loss self.num_parallel_samples = num_parallel_samples self.unmasked_channel_indices = unmasked_channel_indices self.norm_eps = norm_eps super().__init__(**kwargs)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/patchtsmixer/__init__.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = { "configuration_patchtsmixer": [ "PATCHTSMIXER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PatchTSMixerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_patchtsmixer"] = [ "PATCHTSMIXER_PRETRAINED_MODEL_ARCHIVE_LIST", "PatchTSMixerPreTrainedModel", "PatchTSMixerModel", "PatchTSMixerForPretraining", "PatchTSMixerForPrediction", "PatchTSMixerForTimeSeriesClassification", "PatchTSMixerForRegression", ] if TYPE_CHECKING: from .configuration_patchtsmixer import ( PATCHTSMIXER_PRETRAINED_CONFIG_ARCHIVE_MAP, PatchTSMixerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_patchtsmixer import ( PATCHTSMIXER_PRETRAINED_MODEL_ARCHIVE_LIST, PatchTSMixerForPrediction, PatchTSMixerForPretraining, PatchTSMixerForRegression, PatchTSMixerForTimeSeriesClassification, PatchTSMixerModel, PatchTSMixerPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
# coding=utf-8 # Copyright 2023 IBM and HuggingFace Inc. team. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch PatchTSMixer model.""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from transformers.modeling_utils import PreTrainedModel from transformers.utils import ModelOutput from ...time_series_utils import NegativeBinomialOutput, NormalOutput, StudentTOutput from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_patchtsmixer import PatchTSMixerConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "PatchTSMixerConfig" from ..deprecated._archive_maps import PATCHTSMIXER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 PATCHTSMIXER_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`PatchTSMixerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. mask_input (`bool`, *optional*, defaults to `False`): If True, Masking will be enabled. False otherwise. """ PATCHTSMIXER_INPUTS_DOCSTRING = r""" Args: past_values (`torch.FloatTensor` of shape `(batch_size, seq_length, num_input_channels)`): Context values of the time series. For a pretraining task, this denotes the input time series to predict the masked portion. For a forecasting task, this denotes the history/past time series values. Similarly, for classification or regression tasks, it denotes the appropriate context values of the time series. For univariate time series, `num_input_channels` dimension should be 1. For multivariate time series, it is greater than 1. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class PatchTSMixerGatedAttention(nn.Module): """ Module that applies gated attention to input data. Args: in_size (`int`): The input size. out_size (`int`): The output size. """ def __init__(self, in_size: int, out_size: int): super().__init__() self.attn_layer = nn.Linear(in_size, out_size) self.attn_softmax = nn.Softmax(dim=-1) def forward(self, inputs): attn_weight = self.attn_softmax(self.attn_layer(inputs)) inputs = inputs * attn_weight return inputs # Copied from transformers.models.patchtst.modeling_patchtst.PatchTSTBatchNorm with PatchTST->PatchTSMixer class PatchTSMixerBatchNorm(nn.Module): """ Compute batch normalization over the sequence length (time) dimension. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.batchnorm = nn.BatchNorm1d(config.d_model, eps=config.norm_eps) def forward(self, inputs: torch.Tensor): """ Parameters: inputs (`torch.Tensor` of shape `(batch_size, sequence_length, d_model)`): input for Batch norm calculation Returns: `torch.Tensor` of shape `(batch_size, sequence_length, d_model)` """ output = inputs.transpose(1, 2) # output: (batch_size, d_model, sequence_length) output = self.batchnorm(output) return output.transpose(1, 2) class PatchTSMixerPositionalEncoding(nn.Module): """ Class for positional encoding """ def __init__(self, config: PatchTSMixerConfig): super().__init__() # positional encoding: [num_patches x d_model] if config.use_positional_encoding: self.position_enc = self._init_pe(config) else: self.position_enc = nn.Parameter(torch.zeros(config.num_patches, config.d_model)) @staticmethod def _init_pe(config: PatchTSMixerConfig) -> nn.Parameter: # Positional encoding if config.positional_encoding_type == "random": position_enc = nn.Parameter(torch.randn(config.num_patches, config.d_model), requires_grad=True) elif config.positional_encoding_type == "sincos": position_enc = torch.zeros(config.num_patches, config.d_model) position = torch.arange(0, config.num_patches).unsqueeze(1) div_term = torch.exp(torch.arange(0, config.d_model, 2) * -(math.log(10000.0) / config.d_model)) position_enc[:, 0::2] = torch.sin(position * div_term) position_enc[:, 1::2] = torch.cos(position * div_term) position_enc = position_enc - position_enc.mean() position_enc = position_enc / (position_enc.std() * 10) position_enc = nn.Parameter(position_enc, requires_grad=False) else: raise ValueError( f"{config.positional_encoding_type} is not a valid positional encoder. Available types are 'random' and 'sincos'." ) return position_enc def forward(self, patch_input: torch.Tensor): # hidden_state: [bs x num_channels x num_patches x d_model] hidden_state = patch_input + self.position_enc return hidden_state class PatchTSMixerNormLayer(nn.Module): """Normalization block Args: config (`PatchTSMixerConfig`, *required*): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.norm_mlp = config.norm_mlp if "batch" in config.norm_mlp.lower(): self.norm = PatchTSMixerBatchNorm(config) else: self.norm = nn.LayerNorm(config.d_model, eps=config.norm_eps) def forward(self, inputs: torch.Tensor): """ Args: inputs (`torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))`): Input to the normalization layer. Returns: `torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))` """ if "batch" in self.norm_mlp.lower(): # reshape the data inputs_reshaped = torch.reshape( inputs, ( inputs.shape[0] * inputs.shape[1], inputs.shape[2], inputs.shape[3], ), ) # inputs_reshaped: [batch_size*num_channels, num_patches, d_model] # inputs_reshaped: [batch_size*num_channels, num_patches, d_model] inputs_reshaped = self.norm(inputs_reshaped) # put back data to the original shape inputs = torch.reshape(inputs_reshaped, inputs.shape) else: inputs = self.norm(inputs) return inputs class PatchTSMixerMLP(nn.Module): def __init__(self, in_features, out_features, config): super().__init__() num_hidden = in_features * config.expansion_factor self.fc1 = nn.Linear(in_features, num_hidden) self.dropout1 = nn.Dropout(config.dropout) self.fc2 = nn.Linear(num_hidden, out_features) self.dropout2 = nn.Dropout(config.dropout) def forward(self, inputs: torch.Tensor): """ Args: inputs (`torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))`): Input to the MLP layer. Returns: `torch.Tensor` of the same shape as `inputs` """ inputs = self.dropout1(nn.functional.gelu(self.fc1(inputs))) inputs = self.fc2(inputs) inputs = self.dropout2(inputs) return inputs class PatchTSMixerChannelFeatureMixerBlock(nn.Module): """This module mixes the features in the channel dimension. Args: config (`PatchTSMixerConfig`, *required*): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.norm = PatchTSMixerNormLayer(config) self.gated_attn = config.gated_attn self.mlp = PatchTSMixerMLP( in_features=config.num_input_channels, out_features=config.num_input_channels, config=config, ) if config.gated_attn: self.gating_block = PatchTSMixerGatedAttention( in_size=config.num_input_channels, out_size=config.num_input_channels ) def forward(self, inputs: torch.Tensor): """ Args: inputs (`torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))`): input to the MLP layer Returns: `torch.Tensor` of the same shape as `inputs` """ residual = inputs inputs = self.norm(inputs) inputs = inputs.permute(0, 3, 2, 1) if self.gated_attn: inputs = self.gating_block(inputs) inputs = self.mlp(inputs) inputs = inputs.permute(0, 3, 2, 1) out = inputs + residual return out # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->PatchTSMixer class PatchTSMixerAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[PatchTSMixerConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value class PatchMixerBlock(nn.Module): """This module mixes the patch dimension. Args: config (`PatchTSMixerConfig`, *required*): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.norm = PatchTSMixerNormLayer(config) self.self_attn = config.self_attn self.gated_attn = config.gated_attn self.mlp = PatchTSMixerMLP( in_features=config.num_patches, out_features=config.num_patches, config=config, ) if config.gated_attn: self.gating_block = PatchTSMixerGatedAttention(in_size=config.num_patches, out_size=config.num_patches) if config.self_attn: self.self_attn_layer = PatchTSMixerAttention( embed_dim=config.d_model, num_heads=config.self_attn_heads, dropout=config.dropout, ) self.norm_attn = PatchTSMixerNormLayer(config) def forward(self, hidden_state): """ Args: hidden_state (`torch.Tensor`): Input tensor. Returns: `torch.Tensor`: Transformed tensor. """ residual = hidden_state hidden_state = self.norm(hidden_state) if self.self_attn: batch_size, n_vars, num_patches, d_model = hidden_state.shape hidden_state_reshaped = hidden_state.reshape(batch_size * n_vars, num_patches, d_model) x_attn, _, _ = self.self_attn_layer(hidden_state_reshaped, output_attentions=False) x_attn = x_attn.reshape(batch_size, n_vars, num_patches, d_model) # Transpose so that num_patches is the last dimension hidden_state = hidden_state.transpose(2, 3) hidden_state = self.mlp(hidden_state) if self.gated_attn: hidden_state = self.gating_block(hidden_state) # Transpose back hidden_state = hidden_state.transpose(2, 3) if self.self_attn: hidden_state = self.norm_attn(hidden_state + x_attn) out = hidden_state + residual return out class FeatureMixerBlock(nn.Module): """This module mixes the hidden feature dimension. Args: config (`PatchTSMixerConfig`, *required*): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.norm = PatchTSMixerNormLayer(config) self.gated_attn = config.gated_attn self.mlp = PatchTSMixerMLP( in_features=config.d_model, out_features=config.d_model, config=config, ) if config.gated_attn: self.gating_block = PatchTSMixerGatedAttention(in_size=config.d_model, out_size=config.d_model) def forward(self, hidden: torch.Tensor): """ Args: hidden (`torch.Tensor` of shape `(batch_size, num_patches, d_model)`): Input tensor to the layer. Returns: `torch.Tensor`: Transformed tensor. """ residual = hidden hidden = self.norm(hidden) hidden = self.mlp(hidden) if self.gated_attn: hidden = self.gating_block(hidden) out = hidden + residual return out class PatchTSMixerLayer(nn.Module): """ The `PatchTSMixer` layer that does all three kinds of mixing. Args: config (`PatchTSMixerConfig`, *required*): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.patch_mixer = PatchMixerBlock(config=config) self.feature_mixer = FeatureMixerBlock(config=config) self.mode = config.mode if config.mode == "mix_channel": self.channel_feature_mixer = PatchTSMixerChannelFeatureMixerBlock(config=config) def forward(self, hidden: torch.Tensor): """ Args: hidden (`torch.Tensor` of shape `(batch_size, num_patches, d_model)`): Input tensor to the layer. Returns: `torch.Tensor`: Transformed tensor. """ if self.mode == "mix_channel": hidden = self.channel_feature_mixer(hidden) hidden = self.patch_mixer(hidden) hidden = self.feature_mixer(hidden) # hidden: (batch_size x num_patches x d_model) return hidden class PatchTSMixerBlock(nn.Module): """The main computing framework of the `PatchTSMixer` model. Args: config (`PatchTSMixerConfig`, *required*): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() num_layers = config.num_layers self.mixers = nn.ModuleList([PatchTSMixerLayer(config=config) for _ in range(num_layers)]) def forward(self, hidden_state, output_hidden_states: bool = False): """ Args: hidden_state (`torch.Tensor`): The input tensor. output_hidden_states (`bool`, *optional*, defaults to False.): Whether to output the hidden states as well. Returns: `torch.Tensor`: The embedding. `list`: List of all hidden states if `output_hidden_states` is set to `True`. """ all_hidden_states = [] embedding = hidden_state for mod in self.mixers: embedding = mod(embedding) if output_hidden_states: all_hidden_states.append(embedding) if output_hidden_states: return embedding, all_hidden_states else: return embedding, None class PatchTSMixerForPredictionHead(nn.Module): """Prediction Head for Forecasting Args: config (`PatchTSMixerConfig`, *required*): Configuration. """ def __init__(self, config: PatchTSMixerConfig, distribution_output=None): super().__init__() self.prediction_channel_indices = config.prediction_channel_indices if self.prediction_channel_indices is not None: self.prediction_channel_indices.sort() self.dropout_layer = nn.Dropout(config.head_dropout) if distribution_output is None: self.base_forecast_block = nn.Linear((config.num_patches * config.d_model), config.prediction_length) else: self.base_forecast_block = distribution_output.get_parameter_projection( config.num_patches * config.d_model ) self.flatten = nn.Flatten(start_dim=-2) def forward(self, hidden_features): """ Args: hidden_features (`torch.Tensor` of shape `(batch_size, num_patch, d_model)` in `flatten` mode or `(batch_size, n_vars, num_patch, d_model)` in `common_channel`/`mix_channel` mode.): Input hidden features. Returns: `torch.Tensor` of shape `(batch_size, prediction_length, nvars)`. """ hidden_features = self.flatten(hidden_features) # [batch_size x n_vars x num_patch * d_model] hidden_features = self.dropout_layer(hidden_features) # [batch_size x n_vars x num_patch * d_model] forecast = self.base_forecast_block(hidden_features) # [batch_size x n_vars x prediction_length] if isinstance(forecast, tuple): forecast = tuple(z.transpose(-1, -2) for z in forecast) else: forecast = forecast.transpose(-1, -2) # [batch_size x prediction_length x n_vars] if self.prediction_channel_indices is not None: if isinstance(forecast, tuple): forecast = tuple(z[..., self.prediction_channel_indices] for z in forecast) else: forecast = forecast[..., self.prediction_channel_indices] # [batch_size x prediction_length x n_vars] return forecast class PatchTSMixerLinearHead(nn.Module): """Linear head for Classification and Regression. Args: config (`PatchTSMixerConfig`, *required*): """ def __init__(self, config: PatchTSMixerConfig, distribution_output=None): super().__init__() self.head_aggregation = config.head_aggregation self.output_range = config.output_range if config.head_aggregation is None: mul_factor = config.num_patches else: mul_factor = 1 self.distribution_output = distribution_output if distribution_output is None: self.projection = nn.Linear( config.d_model * config.num_input_channels * mul_factor, config.num_targets, ) else: self.projection = distribution_output.get_parameter_projection( config.d_model * config.num_input_channels * mul_factor ) if config.head_aggregation is None: self.flatten = nn.Flatten(start_dim=-3) else: self.flatten = nn.Flatten(start_dim=-2) self.dropout = nn.Dropout(config.head_dropout) def forward(self, hidden_features): """ Args: hidden_features (`torch.Tensor` of shape `(batch_size x num_patch x d_model)` in `flatten` mode or `(batch_size x n_vars x num_patch x d_model)` in `common_channel`/`mix_channel` mode.): Input hidden features. Returns: `torch.Tensor` of shape `(batch_size x num_targets)`. """ # batch_size x d_model x num_patch or batch_size x n_vars x d_model x num_patch hidden_features = hidden_features.transpose(-1, -2) if self.head_aggregation == "use_last": # batch_size x d_model (flatten) or # batch_size x n_vars x d_model (common_channel) hidden_features = hidden_features[..., -1] elif self.head_aggregation == "max_pool": # batch_size x n_vars x d_model or batch_size x d_model hidden_features = hidden_features.max(dim=-1).values elif self.head_aggregation == "avg_pool": # batch_size x n_vars x d_model or batch_size x d_model hidden_features = hidden_features.mean(dim=-1) if self.flatten: hidden_features = self.flatten(hidden_features) hidden_features = self.dropout(hidden_features) hidden_features = self.projection(hidden_features) # batch_size x num_targets if (self.distribution_output is None) and (self.output_range is not None): hidden_features = ( torch.sigmoid(hidden_features) * (self.output_range[1] - self.output_range[0]) + self.output_range[0] ) return hidden_features class PatchTSMixerPreTrainedModel(PreTrainedModel): # Weight initialization config_class = PatchTSMixerConfig base_model_prefix = "model" main_input_name = "past_values" supports_gradient_checkpointing = False def _init_weights(self, module): """Initialize weights""" if isinstance(module, PatchTSMixerPositionalEncoding): # initialize positional encoding if self.config.positional_encoding_type == "random": nn.init.normal_(module.position_enc, mean=0.0, std=0.1) elif isinstance(module, (nn.LayerNorm, nn.BatchNorm1d)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, PatchTSMixerBatchNorm): module.batchnorm.bias.data.zero_() module.batchnorm.weight.data.fill_(1.0) elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.init_std) if module.bias is not None: module.bias.data.zero_() class PatchTSMixerPretrainHead(nn.Module): """Pretraining head. Args: config (`PatchTSMixerConfig`, *required*): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.dropout_layer = nn.Dropout(config.head_dropout) self.base_pt_block = nn.Linear(config.d_model, config.patch_length) def forward(self, hidden_features): """ Args: hidden_features (`torch.Tensor` of shape `(batch_size x num_patch x d_model)` in `flatten` mode or `(batch_size x n_vars x num_patch x d_model)` in `common_channel`/`mix_channel` mode.): Input hidden features. Returns: `torch.Tensor` of shape `(batch_size x n_vars x num_patch x patch_length)`. """ hidden_features = self.dropout_layer(hidden_features) forecast = self.base_pt_block(hidden_features) # [batch_size x n_vars x num_patch x patch_length] return forecast # Copied from transformers.models.patchtst.modeling_patchtst.random_masking def random_masking( inputs: torch.Tensor, mask_ratio: float, unmasked_channel_indices: list = None, channel_consistent_masking: bool = False, mask_value: int = 0, ): """random_masking: Mask the input considering the control variables. Args: inputs (`torch.Tensor` of shape `(batch_size, num_channels, sequence_length, num_features)`): The input tensor to mask. mask_ratio (`float`): Masking ratio applied to mask the input data during random pretraining. It is the number between 0 and 1. unmasked_channel_indices (list, *optional*): Indices of channels that will not be masked. channel_consistent_masking (bool, *optional*, defaults to `False`): When true, masking will be same across all channels of a timeseries. Otherwise, masking positions will vary across channels. mask_value (int, *optional*, defaults to 0): Define the value of masked patches for pretraining. Returns: `tuple(torch.Tensor)`: inputs_mask, masked input, same shape as input Tensor and mask tensor of shape [bs x c x n] """ if mask_ratio < 0 or mask_ratio >= 1: raise ValueError(f"Mask ratio {mask_ratio} has to be between 0 and 1.") batch_size, num_channels, sequence_length, num_features = inputs.shape device = inputs.device len_keep = int(sequence_length * (1 - mask_ratio)) if channel_consistent_masking: noise = torch.rand(batch_size, 1, sequence_length, device=device) # noise in [0, 1], bs x 1 x L noise = noise.repeat(1, num_channels, 1) # bs x num_channels x time else: # noise in [0, 1], bs x num_channels x L noise = torch.rand(batch_size, num_channels, sequence_length, device=device) # mask: [bs x num_channels x num_patch] mask = torch.ones(batch_size, num_channels, sequence_length, device=device) mask[:, :, :len_keep] = 0 # sort noise for each sample ids_shuffle = torch.argsort(noise, dim=-1) # ascend: small is keep, large is remove ids_restore = torch.argsort(ids_shuffle, dim=-1) # ids_restore: [bs x num_channels x L] mask = torch.gather(mask, dim=-1, index=ids_restore) mask = mask.unsqueeze(-1).repeat(1, 1, 1, num_features) # mask: [bs x num_channels x num_patches x patch_length] if unmasked_channel_indices is not None: mask[:, unmasked_channel_indices, :, :] = 0 inputs_mask = inputs.masked_fill(mask.bool(), mask_value) return inputs_mask, mask[..., 0] # Copied from transformers.models.patchtst.modeling_patchtst.forecast_masking def forecast_masking( inputs: torch.Tensor, num_forecast_mask_patches: Union[list, int], unmasked_channel_indices: list = None, mask_value: int = 0, ): """Forecast masking that masks the last K patches where K is from the num_forecast_mask_patches. If num_forecast_mask_patches is a list, samples in the batch will be randomly masked by numbers defined in the list. Parameters: inputs (`torch.Tensor`): Input of shape `(bs, num_channels, num_patch, patch_length)` num_forecast_mask_patches (`list`): Number of patches to be masked at the end of each batch sample. e.g. 4 or [3, 5]. unmasked_channel_indices (`list`, *optional*): Indices of channels that are not masked. mask_value (`int`, *optional*, defaults to 0): Values in the masked patches will be filled by `mask_value`. Returns: `tuple(torch.Tensor)`: inputs_mask, masked input, same shape as inputs Tensor and Mask tensor of shape `(bs, num_channels , num_patch)` or `(bs, tsg1, tsg2, num_channels, num_patch)` """ if isinstance(num_forecast_mask_patches, int): num_forecast_mask_patches = [num_forecast_mask_patches] forecast_mask_ratios = [1 for _ in num_forecast_mask_patches] batch_size, num_channels, sequence_length, num_features = inputs.shape mask = torch.zeros(batch_size, num_channels, sequence_length, device=inputs.device) t_list = [] total_length = 0 total_ratio = sum(forecast_mask_ratios) for patch_length, ratio in zip(num_forecast_mask_patches, forecast_mask_ratios): if patch_length <= 0 or patch_length >= sequence_length: raise ValueError( f"num_forecast_mask_patches {patch_length} should be greater than 0 and less than total patches." ) temp_len = int(batch_size * ratio / total_ratio) t_list.append([patch_length, ratio, temp_len]) total_length += temp_len t_list = sorted(t_list, key=lambda x: x[2]) if total_length < batch_size: t_list[0][2] = t_list[0][2] + (batch_size - total_length) elif total_length > batch_size: t_list[-1][2] = t_list[-1][2] + (total_length - batch_size) batch1 = 0 for patch_len, _, temp_len in t_list: batch2 = batch1 + temp_len mask[batch1:batch2, :, -patch_len:] = 1 batch1 = batch2 perm = torch.randperm(mask.shape[0]) mask = mask[perm] mask = mask.unsqueeze(-1).repeat(1, 1, 1, num_features) # mask: [bs x num_channels x num_patch x patch_len] if unmasked_channel_indices is not None: mask[:, unmasked_channel_indices, :, :] = 0 inputs_mask = inputs.masked_fill(mask.bool(), mask_value) return inputs_mask, mask[..., 0] # Copied from transformers.models.patchtst.modeling_patchtst.PatchTSTPatchify with PatchTST->PatchTSMixer class PatchTSMixerPatchify(nn.Module): """ A class to patchify the time series sequence into different patches Returns: `torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)` """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.sequence_length = config.context_length self.patch_length = config.patch_length self.patch_stride = config.patch_stride if self.sequence_length <= self.patch_length: raise ValueError( f"Sequence length ({self.sequence_length}) has to be greater than the patch length ({self.patch_length})" ) # get the number of patches self.num_patches = (max(self.sequence_length, self.patch_length) - self.patch_length) // self.patch_stride + 1 new_sequence_length = self.patch_length + self.patch_stride * (self.num_patches - 1) self.sequence_start = self.sequence_length - new_sequence_length def forward(self, past_values: torch.Tensor): """ Parameters: past_values (`torch.Tensor` of shape `(batch_size, sequence_length, num_channels)`, *required*): Input for patchification Returns: `torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)` """ sequence_length = past_values.shape[-2] if sequence_length != self.sequence_length: raise ValueError( f"Input sequence length ({sequence_length}) doesn't match model configuration ({self.sequence_length})." ) # output: [bs x new_sequence_length x num_channels] output = past_values[:, self.sequence_start :, :] # output: [bs x num_patches x num_input_channels x patch_length] output = output.unfold(dimension=-2, size=self.patch_length, step=self.patch_stride) # output: [bs x num_input_channels x num_patches x patch_length] output = output.transpose(-2, -3).contiguous() return output # Copied from transformers.models.patchtst.modeling_patchtst.PatchTSTMasking with PatchTST->PatchTSMixer class PatchTSMixerMasking(nn.Module): """ Class to perform random or forecast masking. Parameters: config (`PatchTSMixerConfig`): model config Returns: x_mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`) Masked patched input mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`) Bool tensor indicating True on masked points """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.random_mask_ratio = config.random_mask_ratio self.channel_consistent_masking = config.channel_consistent_masking self.mask_type = config.mask_type self.num_forecast_mask_patches = config.num_forecast_mask_patches self.unmasked_channel_indices = config.unmasked_channel_indices self.mask_value = config.mask_value if self.unmasked_channel_indices is not None: self.unmasked_channel_indices = sorted(self.unmasked_channel_indices) def forward(self, patch_input: torch.Tensor): """ Parameters: patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*): Patch input Return: masked_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`) Masked patched input mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`) Bool tensor indicating True on masked points """ if self.mask_type == "random": masked_input, mask = random_masking( inputs=patch_input, mask_ratio=self.random_mask_ratio, unmasked_channel_indices=self.unmasked_channel_indices, channel_consistent_masking=self.channel_consistent_masking, mask_value=self.mask_value, ) elif self.mask_type == "forecast": masked_input, mask = forecast_masking( inputs=patch_input, num_forecast_mask_patches=self.num_forecast_mask_patches, unmasked_channel_indices=self.unmasked_channel_indices, mask_value=self.mask_value, ) else: raise ValueError(f"Invalid mask type {self.mask_type}.") # mask: [bs x num_input_channels x num_patch] mask = mask.bool() return masked_input, mask # Copied from transformers.models.patchtst.modeling_patchtst.PatchTSTStdScaler with PatchTST->PatchTSMixer class PatchTSMixerStdScaler(nn.Module): """ Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by subtracting from the mean and dividing by the standard deviation. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 self.keepdim = config.keepdim if hasattr(config, "keepdim") else True self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-5 def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Parameters: data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): input for Batch norm calculation observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Calculating the scale on the observed indicator. Returns: tuple of `torch.Tensor` of shapes (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, `(batch_size, 1, num_input_channels)`) """ denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim) denominator = denominator.clamp_min(1.0) loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator scale = torch.sqrt(variance + self.minimum_scale) return (data - loc) / scale, loc, scale # Copied from transformers.models.patchtst.modeling_patchtst.PatchTSTMeanScaler with PatchTST->PatchTSMixer class PatchTSMixerMeanScaler(nn.Module): """ Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data accordingly. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 self.keepdim = config.keepdim if hasattr(config, "keepdim") else True self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10 self.default_scale = config.default_scale if hasattr(config, "default_scale") else None def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Parameters: data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): input for Batch norm calculation observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Calculating the scale on the observed indicator. Returns: tuple of `torch.Tensor` of shapes (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, `(batch_size, 1, num_input_channels)`) """ ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True) num_observed = observed_indicator.sum(self.dim, keepdim=True) scale = ts_sum / torch.clamp(num_observed, min=1) # If `default_scale` is provided, we use it, otherwise we use the scale # of the batch. if self.default_scale is None: batch_sum = ts_sum.sum(dim=0) batch_observations = torch.clamp(num_observed.sum(0), min=1) default_scale = torch.squeeze(batch_sum / batch_observations) else: default_scale = self.default_scale * torch.ones_like(scale) # apply default scale where there are no observations scale = torch.where(num_observed > 0, scale, default_scale) # ensure the scale is at least `self.minimum_scale` scale = torch.clamp(scale, min=self.minimum_scale) scaled_data = data / scale if not self.keepdim: scale = scale.squeeze(dim=self.dim) return scaled_data, torch.zeros_like(scale), scale # Copied from transformers.models.patchtst.modeling_patchtst.PatchTSTNOPScaler with PatchTST->PatchTSMixer class PatchTSMixerNOPScaler(nn.Module): """ Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 self.keepdim = config.keepdim if hasattr(config, "keepdim") else True def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor = None ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Parameters: data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): input for Batch norm calculation Returns: tuple of `torch.Tensor` of shapes (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, `(batch_size, 1, num_input_channels)`) """ scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) return data, loc, scale @dataclass class PatchTSMixerEncoderOutput(ModelOutput): """ Base class for `PatchTSMixerEncoderOutput`, with potential hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, d_model)`): Hidden-state at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*): Hidden-states of the model at the output of each layer. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None class PatchTSMixerEncoder(PatchTSMixerPreTrainedModel): """ Encoder for PatchTSMixer which inputs patched time-series and outputs patched embeddings. Args: config (`PatchTSMixerConfig`, *required*): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__(config) self.use_return_dict = config.use_return_dict self.patcher = nn.Linear(config.patch_length, config.d_model) if config.use_positional_encoding: self.positional_encoder = PatchTSMixerPositionalEncoding(config=config) else: self.positional_encoder = None self.mlp_mixer_encoder = PatchTSMixerBlock(config=config) # Initialize weights and apply final processing if config.post_init: self.post_init() @replace_return_docstrings(output_type=PatchTSMixerEncoderOutput, config_class=_CONFIG_FOR_DOC) def forward( self, past_values: torch.Tensor, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = None, ) -> Union[Tuple, PatchTSMixerEncoderOutput]: r""" Args: past_values (`torch.FloatTensor` of shape `(batch_size, seq_length, num_input_channels)`): Context values of the time series. For a pretraining task, this denotes the input time series to predict the masked portion. For a forecasting task, this denotes the history/past time series values. Similarly, for classification or regression tasks, it denotes the appropriate context values of the time series. For univariate time series, `num_input_channels` dimension should be 1. For multivariate time series, it is greater than 1. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. Returns: `torch.FloatTensor` of shape `(batch_size, n_vars, num_patches, d_model)` """ return_dict = return_dict if return_dict is not None else self.use_return_dict # flatten [bs x num_patch x d_model]. common_channel/mix_channel: [bs x n_vars x num_patch x d_model] patches = self.patcher(past_values) # add positional encoder if self.positional_encoder is not None: patches = self.positional_encoder(patches) last_hidden_state, hidden_states = self.mlp_mixer_encoder(patches, output_hidden_states=output_hidden_states) if not return_dict: return tuple( v for v in [ last_hidden_state, hidden_states, ] ) return PatchTSMixerEncoderOutput(last_hidden_state=last_hidden_state, hidden_states=hidden_states) @dataclass class PatchTSMixerModelOutput(ModelOutput): """ Base class for model's outputs, with potential hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, d_model)`): Hidden-state at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*): Hidden-states of the model at the output of each layer. patch_input (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, patch_length)`): Patched input data to the model. mask: (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches)`,*optional*): Bool Tensor indicating True in masked patches and False otherwise. loc: (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`,*optional*): Gives the mean of the context window per channel. Used for revin denorm outside the model, if revin enabled. scale: (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`,*optional*): Gives the std dev of the context window per channel. Used for revin denorm outside the model, if revin enabled. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None patch_input: torch.FloatTensor = None mask: Optional[torch.FloatTensor] = None loc: Optional[torch.FloatTensor] = None scale: Optional[torch.FloatTensor] = None @add_start_docstrings( "The PatchTSMixer Model for time-series forecasting.", PATCHTSMIXER_START_DOCSTRING, ) class PatchTSMixerModel(PatchTSMixerPreTrainedModel): def __init__(self, config: PatchTSMixerConfig, mask_input: bool = False): super().__init__(config) self.use_return_dict = config.use_return_dict self.encoder = PatchTSMixerEncoder(config) self.patching = PatchTSMixerPatchify(config) if mask_input is True: self.masking = PatchTSMixerMasking(config) else: self.masking = None if config.scaling == "mean": self.scaler = PatchTSMixerMeanScaler(config) elif config.scaling == "std" or config.scaling is True: self.scaler = PatchTSMixerStdScaler(config) else: self.scaler = PatchTSMixerNOPScaler(config) # Initialize weights and apply final processing if config.post_init: self.post_init() @add_start_docstrings_to_model_forward(PATCHTSMIXER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=PatchTSMixerModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, past_values: torch.Tensor, observed_mask: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = None, ) -> PatchTSMixerModelOutput: r""" observed_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). Returns: """ return_dict = return_dict if return_dict is not None else self.use_return_dict mask = None if observed_mask is None: observed_mask = torch.ones_like(past_values) scaled_past_values, loc, scale = self.scaler(past_values, observed_mask) patched_x = self.patching(scaled_past_values) # [batch_size x num_input_channels x num_patch x patch_length enc_input = patched_x if self.masking is not None: enc_input, mask = self.masking(patched_x) # enc_input: [batch_size x num_input_channels x num_patch x patch_length] # mask: [batch_size x num_input_channels x num_patch] encoder_output = self.encoder( enc_input, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if isinstance(encoder_output, tuple): encoder_output = PatchTSMixerEncoderOutput(*encoder_output) if not return_dict: return tuple( v for v in [ encoder_output.last_hidden_state, encoder_output.hidden_states, patched_x, mask, loc, scale, ] ) return PatchTSMixerModelOutput( last_hidden_state=encoder_output.last_hidden_state, hidden_states=encoder_output.hidden_states, patch_input=patched_x, mask=mask, loc=loc, scale=scale, ) @dataclass class PatchTSMixerForPreTrainingOutput(ModelOutput): """ Output type of [`PatchTSMixerForPreTrainingOutput`]. Args: prediction_outputs (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, patch_length)`): Prediction output from the pretrain head. hidden_states (`tuple(torch.FloatTensor)`, *optional*): Hidden-states of the model at the output of each layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, d_model)`): Backbone embeddings before passing through the head. loss (*optional*, returned when `y` is provided, `torch.FloatTensor` of shape `()`): Total loss """ loss: Optional[torch.FloatTensor] = None prediction_outputs: torch.FloatTensor = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None class PatchTSMixerForPretraining(PatchTSMixerPreTrainedModel): r""" `PatchTSMixer` for mask pretraining. Args: config (`PatchTSMixerConfig`, *required*): Configuration. Returns: `None`. """ def __init__(self, config: PatchTSMixerConfig): super().__init__(config) self.model = PatchTSMixerModel(config, mask_input=True) self.head = PatchTSMixerPretrainHead(config=config) self.masked_loss = config.masked_loss self.use_return_dict = config.use_return_dict # Initialize weights and apply final processing if config.post_init: self.post_init() @add_start_docstrings_to_model_forward(PATCHTSMIXER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=PatchTSMixerForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, past_values: torch.Tensor, observed_mask: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = False, return_loss: bool = True, return_dict: Optional[bool] = None, ) -> PatchTSMixerForPreTrainingOutput: r""" observed_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). return_loss (`bool`, *optional*): Whether to return the loss in the `forward` call. Returns: """ return_dict = return_dict if return_dict is not None else self.use_return_dict if self.masked_loss is True: loss = torch.nn.MSELoss(reduction="none") else: loss = torch.nn.MSELoss(reduction="mean") # past_values: tensor [batch_size x context_length x num_input_channels] model_output = self.model( past_values, observed_mask=observed_mask, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # x.last_hidden_state: [batch_size x nvars x num_patch x d_model] if isinstance(model_output, tuple): model_output = PatchTSMixerModelOutput(*model_output) x_hat = self.head(model_output.last_hidden_state) # tensor [batch_size x nvars x num_patch x patch_length] if return_loss is True: loss_val = loss(x_hat, model_output.patch_input) else: loss_val = None # calculate masked_loss if self.masked_loss is True and loss_val is not None: loss_val = (loss_val.mean(dim=-1) * model_output.mask).sum() / (model_output.mask.sum() + 1e-10) if not return_dict: return tuple( v for v in [ loss_val, x_hat, model_output.last_hidden_state, model_output.hidden_states, ] ) return PatchTSMixerForPreTrainingOutput( loss=loss_val, prediction_outputs=x_hat, # tensor [batch_size x nvars x num_patch x patch_length] last_hidden_state=model_output.last_hidden_state, # x: [batch_size x nvars x num_patch x d_model] hidden_states=model_output.hidden_states, ) @dataclass class PatchTSMixerForPredictionOutput(ModelOutput): """ Output type of [`PatchTSMixerForPredictionOutput`]. Args: prediction_outputs (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_input_channels)`): Prediction output from the forecast head. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, d_model)`): Backbone embeddings before passing through the head. hidden_states (`tuple(torch.FloatTensor)`, *optional*): Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. loss (*optional*, returned when `y` is provided, `torch.FloatTensor` of shape `()`): Total loss. loc (`torch.FloatTensor`, *optional* of shape `(batch_size, 1, num_input_channels)`): Input mean scale (`torch.FloatTensor`, *optional* of shape `(batch_size, 1, num_input_channels)`): Input std dev """ loss: Optional[torch.FloatTensor] = None prediction_outputs: torch.FloatTensor = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None loc: torch.FloatTensor = None scale: torch.FloatTensor = None @dataclass class SamplePatchTSMixerPredictionOutput(ModelOutput): """ Base class for time series model's predictions outputs that contains the sampled values from the chosen distribution. Args: sequences (`torch.FloatTensor` of shape `(batch_size, num_samples, prediction_length, number_channels)`): Sampled values from the chosen distribution. """ sequences: torch.FloatTensor = None @dataclass class SamplePatchTSMixerRegressionOutput(ModelOutput): """ Base class for time series model's predictions outputs that contains the sampled values from the chosen distribution. Args: sequences (`torch.FloatTensor` of shape `(batch_size, num_samples, num_targets)` Sampled values from the chosen distribution. """ sequences: torch.FloatTensor = None # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.nll def nll(input: torch.distributions.Distribution, target: torch.Tensor) -> torch.Tensor: """ Computes the negative log likelihood loss from input distribution with respect to target. """ return -input.log_prob(target) # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.weighted_average def weighted_average(input_tensor: torch.Tensor, weights: Optional[torch.Tensor] = None, dim=None) -> torch.Tensor: """ Computes the weighted average of a given tensor across a given `dim`, masking values associated with weight zero, meaning instead of `nan * 0 = nan` you will get `0 * 0 = 0`. Args: input_tensor (`torch.FloatTensor`): Input tensor, of which the average must be computed. weights (`torch.FloatTensor`, *optional*): Weights tensor, of the same shape as `input_tensor`. dim (`int`, *optional*): The dim along which to average `input_tensor`. Returns: `torch.FloatTensor`: The tensor with values averaged along the specified `dim`. """ if weights is not None: weighted_tensor = torch.where(weights != 0, input_tensor * weights, torch.zeros_like(input_tensor)) sum_weights = torch.clamp(weights.sum(dim=dim) if dim else weights.sum(), min=1.0) return (weighted_tensor.sum(dim=dim) if dim else weighted_tensor.sum()) / sum_weights else: return input_tensor.mean(dim=dim) class PatchTSMixerForPrediction(PatchTSMixerPreTrainedModel): r""" `PatchTSMixer` for forecasting application. Args: config (`PatchTSMixerConfig`, *required*): Configuration. Returns: `None`. """ def __init__(self, config: PatchTSMixerConfig): super().__init__(config) self.loss = config.loss self.use_return_dict = config.use_return_dict self.prediction_channel_indices = config.prediction_channel_indices self.num_parallel_samples = config.num_parallel_samples if config.loss == "mse": self.distribution_output = None else: dim = config.prediction_length distribution_output_map = { "student_t": StudentTOutput, "normal": NormalOutput, "negative_binomial": NegativeBinomialOutput, } output_class = distribution_output_map.get(config.distribution_output, None) if output_class is not None: self.distribution_output = output_class(dim=dim) else: raise ValueError(f"Unknown distribution output {config.distribution_output}") self.model = PatchTSMixerModel(config) self.head = PatchTSMixerForPredictionHead( config=config, distribution_output=self.distribution_output, ) # Initialize weights and apply final processing if config.post_init: self.post_init() @add_start_docstrings_to_model_forward(PATCHTSMIXER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=PatchTSMixerForPredictionOutput, config_class=_CONFIG_FOR_DOC) def forward( self, past_values: torch.Tensor, observed_mask: Optional[torch.Tensor] = None, future_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = False, return_loss: bool = True, return_dict: Optional[bool] = None, ) -> PatchTSMixerForPredictionOutput: r""" observed_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). future_values (`torch.FloatTensor` of shape `(batch_size, target_len, num_input_channels)` for forecasting,: `(batch_size, num_targets)` for regression, or `(batch_size,)` for classification, *optional*): Target values of the time series, that serve as labels for the model. The `future_values` is what the Transformer needs during training to learn to output, given the `past_values`. Note that, this is NOT required for a pretraining task. For a forecasting task, the shape is be `(batch_size, target_len, num_input_channels)`. Even if we want to forecast only specific channels by setting the indices in `prediction_channel_indices` parameter, pass the target data with all channels, as channel Filtering for both prediction and target will be manually applied before the loss computation. return_loss (`bool`, *optional*): Whether to return the loss in the `forward` call. Returns: """ if self.loss == "mse": loss = nn.MSELoss(reduction="mean") elif self.loss == "nll": loss = nll else: raise ValueError("Invalid loss function: Allowed values: mse and nll") return_dict = return_dict if return_dict is not None else self.use_return_dict # past_values: tensor [batch_size x context_length x num_input_channels] model_output = self.model( past_values, observed_mask=observed_mask, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # model_output: [batch_size x nvars x num_patch x d_model] if isinstance(model_output, tuple): model_output = PatchTSMixerModelOutput(*model_output) # tensor [batch_size x prediction_length x num_input_channels] y_hat = self.head(model_output.last_hidden_state) loss_val = None if self.prediction_channel_indices is not None: if self.distribution_output: distribution = self.distribution_output.distribution( y_hat, loc=model_output.loc[..., self.prediction_channel_indices], scale=model_output.scale[..., self.prediction_channel_indices], ) if future_values is not None and return_loss is True: loss_val = loss( distribution, future_values[..., self.prediction_channel_indices], ) # take average of the loss loss_val = weighted_average(loss_val) else: y_hat = ( y_hat * model_output.scale[..., self.prediction_channel_indices] + model_output.loc[..., self.prediction_channel_indices] ) if future_values is not None and return_loss is True: loss_val = loss(y_hat, future_values[..., self.prediction_channel_indices]) else: if self.distribution_output: distribution = self.distribution_output.distribution( y_hat, loc=model_output.loc, scale=model_output.scale ) if future_values is not None and return_loss is True: loss_val = loss(distribution, future_values) loss_val = weighted_average(loss_val) else: y_hat = y_hat * model_output.scale + model_output.loc if future_values is not None and return_loss is True: loss_val = loss(y_hat, future_values) if self.prediction_channel_indices is not None: loc = model_output.loc[..., self.prediction_channel_indices] scale = model_output.scale[..., self.prediction_channel_indices] else: loc = model_output.loc scale = model_output.scale if not return_dict: return tuple( v for v in [ loss_val, y_hat, model_output.last_hidden_state, model_output.hidden_states, loc, scale, ] ) return PatchTSMixerForPredictionOutput( loss=loss_val, prediction_outputs=y_hat, # tensor [batch_size x prediction_length x num_input_channels] last_hidden_state=model_output.last_hidden_state, # x: [batch_size x nvars x num_patch x d_model] hidden_states=model_output.hidden_states, loc=loc, scale=scale, ) def generate( self, past_values: torch.Tensor, observed_mask: Optional[torch.Tensor] = None, ) -> SamplePatchTSMixerPredictionOutput: """ Generate sequences of sample predictions from a model with a probability distribution head. Args: past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Past values of the time series that serves as context in order to predict the future. observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). Return: [`SamplePatchTSMixerPredictionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of samples, prediction_length, num_input_channels)`. """ # get number of samples num_parallel_samples = self.num_parallel_samples # get model output outputs = self( past_values=past_values, future_values=None, observed_mask=observed_mask, output_hidden_states=False, ) # get distribution distribution = self.distribution_output.distribution( outputs.prediction_outputs, loc=outputs.loc, scale=outputs.scale ) # get samples: list of [batch_size x prediction_length x num_channels] samples = [distribution.sample() for _ in range(num_parallel_samples)] # stack tensors samples = torch.stack(samples, dim=1) # [batch_size x num_samples x prediction_length x num_channels] return SamplePatchTSMixerPredictionOutput(sequences=samples) @dataclass class PatchTSMixerForTimeSeriesClassificationOutput(ModelOutput): """ Output type of [`PatchTSMixerForTimeSeriesClassificationOutput`]. Args: prediction_outputs (`torch.FloatTensor` of shape `(batch_size, num_labels)`): Prediction output from the classfication head. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, d_model)`): Backbone embeddings before passing through the head. hidden_states (`tuple(torch.FloatTensor)`, *optional*): Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. loss (*optional*, returned when `y` is provided, `torch.FloatTensor` of shape `()`): Total loss. """ loss: Optional[torch.FloatTensor] = None prediction_outputs: torch.FloatTensor = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None class PatchTSMixerForTimeSeriesClassification(PatchTSMixerPreTrainedModel): r""" `PatchTSMixer` for classification application. Args: config (`PatchTSMixerConfig`, *required*): Configuration. Returns: `None`. """ def __init__(self, config: PatchTSMixerConfig): super().__init__(config) self.model = PatchTSMixerModel(config) self.head = PatchTSMixerLinearHead( config=config, ) self.use_return_dict = config.use_return_dict if config.scaling in ["std", "mean", True]: self.inject_scale = InjectScalerStatistics4D(d_model=config.d_model, num_patches=config.num_patches) else: self.inject_scale = None # Initialize weights and apply final processing if config.post_init: self.post_init() @add_start_docstrings_to_model_forward(PATCHTSMIXER_INPUTS_DOCSTRING) @replace_return_docstrings( output_type=PatchTSMixerForTimeSeriesClassificationOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, past_values: torch.Tensor, target_values: torch.Tensor = None, output_hidden_states: Optional[bool] = False, return_loss: bool = True, return_dict: Optional[bool] = None, ) -> PatchTSMixerForTimeSeriesClassificationOutput: r""" target_values (`torch.FloatTensor` of shape `(batch_size, target_len, num_input_channels)` for forecasting, `(batch_size, num_targets)` for regression, or `(batch_size,)` for classification, *optional*): Target values of the time series, that serve as labels for the model. The `target_values` is what the Transformer needs during training to learn to output, given the `past_values`. Note that, this is NOT required for a pretraining task. For a forecasting task, the shape is be `(batch_size, target_len, num_input_channels)`. Even if we want to forecast only specific channels by setting the indices in `prediction_channel_indices` parameter, pass the target data with all channels, as channel Filtering for both prediction and target will be manually applied before the loss computation. For a classification task, it has a shape of `(batch_size,)`. For a regression task, it has a shape of `(batch_size, num_targets)`. return_loss (`bool`, *optional*): Whether to return the loss in the `forward` call. Returns: """ loss = torch.nn.CrossEntropyLoss() return_dict = return_dict if return_dict is not None else self.use_return_dict model_output = self.model( past_values, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # x: [batch_size x nvars x num_patch x d_model] if isinstance(model_output, tuple): model_output = PatchTSMixerModelOutput(*model_output) if self.inject_scale is not None: model_output.last_hidden_state = self.inject_scale( model_output.last_hidden_state, loc=model_output.loc, scale=model_output.scale, ) # x: [batch_size x nvars x num_patch x d_model] y_hat = self.head(model_output.last_hidden_state) # tensor [batch_size x n_labels] if target_values is not None and return_loss is True: loss_val = loss(y_hat, target_values) else: loss_val = None if not return_dict: return tuple( v for v in [ loss_val, y_hat, model_output.last_hidden_state, model_output.hidden_states, ] ) return PatchTSMixerForTimeSeriesClassificationOutput( loss=loss_val, prediction_outputs=y_hat, # tensor [batch_size x n_labels] last_hidden_state=model_output.last_hidden_state, # x: [batch_size x nvars x num_patch x d_model] hidden_states=model_output.hidden_states, ) @dataclass class PatchTSMixerForRegressionOutput(ModelOutput): """ Output type of [`PatchTSMixerForRegressionOutput`]. Args: regression_outputs (`torch.FloatTensor` of shape `(batch_size, num_targets)`): Prediction output from the regression head. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, d_model)`): Backbone embeddings before passing through the head. hidden_states (`tuple(torch.FloatTensor)`, *optional*): Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. loss (*optional*, returned when `y` is provided, `torch.FloatTensor` of shape `()`): Total loss. """ loss: Optional[torch.FloatTensor] = None regression_outputs: torch.FloatTensor = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None class InjectScalerStatistics4D(nn.Module): def __init__(self, d_model: int, num_patches: int, expansion: int = 2): super().__init__() self.inverse_trans_expansion = nn.Linear(d_model + 2, expansion * d_model) self.inverse_trans_compression = nn.Linear(expansion * d_model, d_model) self.map_scale_expansion = nn.Linear(2, 2 * expansion) self.map_scale_compression = nn.Linear(2 * expansion, 2) self.num_patches = num_patches def forward(self, inputs: torch.Tensor, loc: torch.Tensor, scale: torch.Tensor): """ Args: inputs (`torch.Tensor` of shape `(batch_size, num_input_channels, num_patch, d_model)`) loc (`torch.Tensor` of shape `(batch_size, 1, num_input_channels)`) scale (`torch.Tensor` of shape `(batch_size, 1, num_input_channels)`) Returns: `torch.Tensor` of shape `(batch_size, num_input_channels, num_patch, d_model)` """ mean = loc.transpose(-1, -2) # [batch_size x n_channels x 1 ] mean = mean.unsqueeze(-2) # [batch_size x n_channels x 1 x 1] mean = mean.repeat(1, 1, self.num_patches, 1) # [batch_size x n_channels x num_patch x 1] stdev = scale.transpose(-1, -2) # [batch_size x n_channels x 1 ] stdev = stdev.unsqueeze(-2) # [batch_size x n_channels x 1 x 1] stdev = stdev.repeat(1, 1, self.num_patches, 1) # [batch_size x n_channels x num_patch x 1] concat_stats = torch.cat([mean, stdev], dim=-1) # [batch_size x n_channels x num_patch x 2] concat_stats = self.map_scale_expansion(concat_stats) # [batch_size x n_channels x num_patch x (2*expansion)] concat_stats = self.map_scale_compression(concat_stats) # [batch_size x n_channels x num_patch x 2] inputs = torch.cat([inputs, concat_stats], dim=-1) # [batch_size x channels x num_patch x d_model+2] inputs = self.inverse_trans_expansion(inputs) # [batch_size x channels x num_patch x (expansion*d_model)] inputs = self.inverse_trans_compression(inputs) # [batch_size x channels x num_patch x d_model] return inputs class PatchTSMixerForRegression(PatchTSMixerPreTrainedModel): r""" `PatchTSMixer` for regression application. Args: config (`PatchTSMixerConfig`, *required*): Configuration. Returns: `None`. """ def __init__(self, config: PatchTSMixerConfig): super().__init__(config) self.model = PatchTSMixerModel(config) self.loss = config.loss self.distribution_output = config.distribution_output self.use_return_dict = config.use_return_dict self.num_parallel_samples = config.num_parallel_samples if config.loss == "mse": self.distribution_output = None else: distribution_output_map = { "student_t": StudentTOutput, "normal": NormalOutput, "negative_binomial": NegativeBinomialOutput, } output_class = distribution_output_map.get(config.distribution_output) if output_class is not None: self.distribution_output = output_class(dim=config.num_targets) else: raise ValueError(f"Unknown distribution output {config.distribution_output}") if config.scaling in ["std", "mean", True]: self.inject_scale = InjectScalerStatistics4D(d_model=config.d_model, num_patches=config.num_patches) else: self.inject_scale = None self.head = PatchTSMixerLinearHead( config=config, distribution_output=self.distribution_output, ) # Initialize weights and apply final processing if config.post_init: self.post_init() @add_start_docstrings_to_model_forward(PATCHTSMIXER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=PatchTSMixerForRegressionOutput, config_class=_CONFIG_FOR_DOC) def forward( self, past_values: torch.Tensor, target_values: torch.Tensor = None, output_hidden_states: Optional[bool] = False, return_loss: bool = True, return_dict: Optional[bool] = None, ) -> PatchTSMixerForRegressionOutput: r""" target_values (`torch.FloatTensor` of shape `(batch_size, target_len, num_input_channels)` for forecasting, `(batch_size, num_targets)` for regression, or `(batch_size,)` for classification, *optional*): Target values of the time series, that serve as labels for the model. The `target_values` is what the Transformer needs during training to learn to output, given the `past_values`. Note that, this is NOT required for a pretraining task. For a forecasting task, the shape is be `(batch_size, target_len, num_input_channels)`. Even if we want to forecast only specific channels by setting the indices in `prediction_channel_indices` parameter, pass the target data with all channels, as channel Filtering for both prediction and target will be manually applied before the loss computation. For a classification task, it has a shape of `(batch_size,)`. For a regression task, it has a shape of `(batch_size, num_targets)`. return_loss (`bool`, *optional*): Whether to return the loss in the `forward` call. Returns: """ if self.loss == "mse": loss = nn.MSELoss(reduction="mean") elif self.loss == "nll": loss = nll else: raise ValueError("Invalid loss function: Allowed values: mse and nll") return_dict = return_dict if return_dict is not None else self.use_return_dict model_output = self.model( past_values, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # model_output: [batch_size x nvars x num_patch x d_model] if isinstance(model_output, tuple): model_output = PatchTSMixerModelOutput(*model_output) if self.inject_scale is not None: model_output.last_hidden_state = self.inject_scale( model_output.last_hidden_state, loc=model_output.loc, scale=model_output.scale, ) # x: [batch_size x nvars x num_patch x d_model] y_hat = self.head(model_output.last_hidden_state) # [batch_size x num_targets] if target_values is not None and return_loss is True: if self.distribution_output: if self.distribution_output == "negative_binomial" and torch.any(target_values < 0): raise Exception("target_values cannot be negative for negative_binomial distribution.") distribution = self.distribution_output.distribution(y_hat) # y_hat should be a 2-tuple, each with dimension [bs, num_targets] y_hat = tuple([item.view(-1, self.config.num_targets) for item in y_hat]) loss_val = loss(distribution, target_values) # take average of the loss loss_val = weighted_average(loss_val) else: loss_val = loss(y_hat, target_values) else: loss_val = None if not return_dict: return tuple( v for v in [ loss_val, y_hat, model_output.last_hidden_state, model_output.hidden_states, ] ) return PatchTSMixerForRegressionOutput( loss=loss_val, regression_outputs=y_hat, # tensor [batch_size x num_targets] last_hidden_state=model_output.last_hidden_state, # [batch_size x nvars x num_patch x d_model] hidden_states=model_output.hidden_states, ) def generate( self, past_values: torch.Tensor, ) -> SamplePatchTSMixerRegressionOutput: """ Generate sequences of sample predictions from a model with a probability distribution head. Args: past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Past values of the time series that serves as context in order to predict the target values. Return: [`SamplePatchTSMixerRegressionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of samples, num_targets)`. """ # get number of samples num_parallel_samples = self.num_parallel_samples # get model output outputs = self( past_values=past_values, target_values=None, output_hidden_states=False, ) # get distribution distribution = self.distribution_output.distribution(outputs.regression_outputs) # get samples samples = [ distribution.sample() for _ in range(num_parallel_samples) ] # samples: list of [batch_size x num_targets] # stack tensors # [batch_size x num_samples x num_targets] samples = torch.stack(samples, dim=1).view(-1, num_parallel_samples, self.config.num_targets) return SamplePatchTSMixerRegressionOutput(sequences=samples)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/mt5/modeling_flax_mt5.py
# coding=utf-8 # Copyright 2021 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Flax mT5 model.""" import jax.numpy as jnp from ...utils import logging from ..t5.modeling_flax_t5 import FlaxT5EncoderModel, FlaxT5ForConditionalGeneration, FlaxT5Model from .configuration_mt5 import MT5Config logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "T5Config" # Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray: """ Shift input ids one token to the right. """ shifted_input_ids = jnp.zeros_like(input_ids) shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1]) shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id) shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) return shifted_input_ids class FlaxMT5Model(FlaxT5Model): r""" This class overrides [`FlaxT5Model`]. Please check the superclass for the appropriate documentation alongside usage examples. Examples: ```python >>> from transformers import FlaxMT5Model, AutoTokenizer >>> model = FlaxMT5Model.from_pretrained("google/mt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> summary = "Weiter Verhandlung in Syrien." >>> inputs = tokenizer(article, return_tensors="np") >>> decoder_input_ids = tokenizer(text_target=summary, return_tensors="np").input_ids >>> outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=decoder_input_ids) >>> hidden_states = outputs.last_hidden_state ```""" model_type = "mt5" config_class = MT5Config class FlaxMT5EncoderModel(FlaxT5EncoderModel): r""" This class overrides [`FlaxT5EncoderModel`]. Please check the superclass for the appropriate documentation alongside usage examples. Examples: ```python >>> from transformers import FlaxT5EncoderModel, AutoTokenizer >>> model = FlaxT5EncoderModel.from_pretrained("google/mt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> summary = "Weiter Verhandlung in Syrien." >>> inputs = tokenizer(article, return_tensors="np") >>> decoder_input_ids = tokenizer(text_target=summary, return_tensors="np").input_ids >>> outputs = model(input_ids=inputs["input_ids"]) >>> hidden_states = outputs.last_hidden_state ```""" model_type = "mt5" config_class = MT5Config class FlaxMT5ForConditionalGeneration(FlaxT5ForConditionalGeneration): r""" This class overrides [`FlaxT5ForConditionalGeneration`]. Please check the superclass for the appropriate documentation alongside usage examples. Examples: ```python >>> from transformers import FlaxMT5ForConditionalGeneration, AutoTokenizer >>> model = FlaxMT5ForConditionalGeneration.from_pretrained("google/mt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> summary = "Weiter Verhandlung in Syrien." >>> inputs = tokenizer(article, return_tensors="np") >>> decoder_input_ids = tokenizer(text_target=summary, return_tensors="np").input_ids >>> outputs = model(**inputs, decoder_input_ids=decoder_input_ids) >>> logits = outputs.logits ```""" model_type = "mt5" config_class = MT5Config
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/mt5/__init__.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..t5.tokenization_t5 import T5Tokenizer else: from ...utils.dummy_sentencepiece_objects import T5Tokenizer MT5Tokenizer = T5Tokenizer if is_tokenizers_available(): from ..t5.tokenization_t5_fast import T5TokenizerFast else: from ...utils.dummy_tokenizers_objects import T5TokenizerFast MT5TokenizerFast = T5TokenizerFast _import_structure = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_mt5"] = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5ForSequenceClassification", "MT5ForTokenClassification", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_mt5"] = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_flax_mt5"] = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mt5 import MT5Config, MT5OnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mt5 import ( MT5EncoderModel, MT5ForConditionalGeneration, MT5ForQuestionAnswering, MT5ForSequenceClassification, MT5ForTokenClassification, MT5Model, MT5PreTrainedModel, MT5Stack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mt5 import TFMT5EncoderModel, TFMT5ForConditionalGeneration, TFMT5Model try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mt5 import FlaxMT5EncoderModel, FlaxMT5ForConditionalGeneration, FlaxMT5Model else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MT5Tokenizer, "MT5TokenizerFast": MT5TokenizerFast}, module_spec=__spec__, )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/mt5/modeling_mt5.py
# coding=utf-8 # Copyright 2020 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch mT5 model.""" import copy import math import os import warnings from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging, replace_return_docstrings, ) from ...utils.model_parallel_utils import assert_device_map, get_device_map from .configuration_mt5 import MT5Config logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "MT5Config" _CHECKPOINT_FOR_DOC = "mt5-small" #################################################### # This dict contains ids and associated url # for the pretrained weights provided with the models #################################################### PARALLELIZE_DOCSTRING = r""" This is an experimental feature and is a subject to change at a moment's notice. Uses a device map to distribute attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks across all devices. Args: device_map (`Dict[int, list]`, optional, defaults to None): A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always automatically mapped to the first device (for esoteric reasons). That means that the first device should have fewer attention modules mapped to it than other devices. For reference, the mt5 models have the following number of attention modules: - mt5-small: 6 - mt5-base: 12 - mt5-large: 24 - mt5-xl: 24 - mt5-xxl: 24 Example: ```python # Here is an example of a device map on a machine with 4 GPUs using mt5-xl, which has a total of 24 attention modules: model = MT5ForConditionalGeneration.from_pretrained("mt5-xl") device_map = { 0: [0, 1, 2], 1: [3, 4, 5, 6, 7, 8, 9], 2: [10, 11, 12, 13, 14, 15, 16], 3: [17, 18, 19, 20, 21, 22, 23], } model.parallelize(device_map) ``` """ DEPARALLELIZE_DOCSTRING = r""" Moves the model to cpu from a model parallel state. Example: ```python # On a 4 GPU machine with mt5-xl: model = MT5ForConditionalGeneration.from_pretrained("Mt5-xl") device_map = { 0: [0, 1, 2], 1: [3, 4, 5, 6, 7, 8, 9], 2: [10, 11, 12, 13, 14, 15, 16], 3: [17, 18, 19, 20, 21, 22, 23], } model.parallelize(device_map) # Splits the model across several devices model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache() ``` """ # Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->MT5 class MT5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Construct a layernorm module in the MT5 style. No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): # MT5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states # Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->MT5 class MT5DenseActDense(nn.Module): def __init__(self, config: MT5Config): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) if ( isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and self.wo.weight.dtype != torch.int8 ): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->MT5 class MT5DenseGatedActDense(nn.Module): def __init__(self, config: MT5Config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) # To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32. # See https://github.com/huggingface/transformers/issues/20287 # we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None`` if ( isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and self.wo.weight.dtype != torch.int8 ): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->MT5 class MT5LayerFF(nn.Module): def __init__(self, config: MT5Config): super().__init__() if config.is_gated_act: self.DenseReluDense = MT5DenseGatedActDense(config) else: self.DenseReluDense = MT5DenseActDense(config) self.layer_norm = MT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5Attention with T5->MT5 class MT5Attention(nn.Module): def __init__(self, config: MT5Config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length, device=None): """Compute binned relative position bias""" if device is None: device = self.relative_attention_bias.weight.device context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) return values def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ # Input is (batch_size, seq_length, dim) # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2] real_seq_length = seq_length if past_key_value is not None: if len(past_key_value) != 2: raise ValueError( f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states" ) real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length key_length = real_seq_length if key_value_states is None else key_value_states.shape[1] def shape(states): """projection""" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) def unshape(states): """reshape""" return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) def project(hidden_states, proj_layer, key_value_states, past_key_value): """projects hidden states correctly to key/query states""" if key_value_states is None: # self-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(hidden_states)) elif past_key_value is None: # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) if past_key_value is not None: if key_value_states is None: # self-attn # (batch_size, n_heads, key_length, dim_per_head) hidden_states = torch.cat([past_key_value, hidden_states], dim=2) elif past_key_value.shape[2] != key_value_states.shape[1]: # checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) else: # cross-attn hidden_states = past_key_value return hidden_states # get query states query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head) # get key/value states key_states = project( hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None ) value_states = project( hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None ) # compute scores scores = torch.matmul( query_states, key_states.transpose(3, 2) ) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9 if position_bias is None: if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device) # if key and values are already calculated # we want only the last query position bias if past_key_value is not None: position_bias = position_bias[:, :, -hidden_states.size(1) :, :] if mask is not None: position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length) if self.pruned_heads: mask = torch.ones(position_bias.shape[1]) mask[list(self.pruned_heads)] = 0 position_bias_masked = position_bias[:, mask.bool()] else: position_bias_masked = position_bias scores += position_bias_masked attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as( scores ) # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) # (batch_size, n_heads, seq_length, key_length) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim) attn_output = self.o(attn_output) present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->MT5 class MT5LayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.SelfAttention = MT5Attention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = MT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->MT5 class MT5LayerCrossAttention(nn.Module): def __init__(self, config): super().__init__() self.EncDecAttention = MT5Attention(config, has_relative_attention_bias=False) self.layer_norm = MT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] # add attentions if we output them return outputs # Copied from transformers.models.t5.modeling_t5.T5Block with T5->MT5 class MT5Block(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder self.layer = nn.ModuleList() self.layer.append(MT5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias)) if self.is_decoder: self.layer.append(MT5LayerCrossAttention(config)) self.layer.append(MT5LayerFF(config)) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True, ): if past_key_value is not None: if not self.is_decoder: logger.warning("`past_key_values` is passed to the encoder. Please make sure this is intended.") expected_num_past_key_values = 2 if encoder_hidden_states is None else 4 if len(past_key_value) != expected_num_past_key_values: raise ValueError( f"There should be {expected_num_past_key_values} past states. " f"{'2 (key / value) for cross attention. ' if expected_num_past_key_values == 4 else ''}" f"Got {len(past_key_value)} past key / value states" ) self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value = None, None self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: # the actual query length is unknown for cross attention # if using past key value states. Need to inject it here if present_key_value_state is not None: query_length = present_key_value_state[0].shape[2] else: query_length = None cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0] # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) # Combine self attn and cross attn key value states if present_key_value_state is not None: present_key_value_state = present_key_value_state + cross_attention_outputs[1] # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states) # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if use_cache: outputs = outputs + (present_key_value_state,) + attention_outputs else: outputs = outputs + attention_outputs return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) def load_tf_weights_in_mt5(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] tf_weights = {} for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) names.append(name) tf_weights[name] = array for txt_name in names: name = txt_name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info(f"Skipping {'/'.join(name)}") tf_weights.pop(txt_name, None) continue if "_slot_" in name[-1]: logger.info(f"Skipping {'/'.join(name)}") tf_weights.pop(txt_name, None) continue pointer = model array = tf_weights[txt_name] for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] in ["kernel", "scale", "embedding"]: pointer = getattr(pointer, "weight") elif scope_names[0] == "self_attention": pointer = getattr(pointer, "layer") pointer = pointer[0] elif scope_names[0] == "enc_dec_attention": pointer = getattr(pointer, "layer") pointer = pointer[1] elif scope_names[0] == "dense_relu_dense": pointer = getattr(pointer, "layer") pointer = pointer[2] elif scope_names[0] == "rms_norm": if hasattr(pointer, "layer_norm"): pointer = getattr(pointer, "layer_norm") elif hasattr(pointer, "final_layer_norm"): pointer = getattr(pointer, "final_layer_norm") elif scope_names[0] == "scale": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") elif scope_names[0] == "decoder" and name[1] == "logits": continue elif scope_names[0] == "logits": pointer = getattr(pointer, "lm_head") elif scope_names[0] == "wi" and len(scope_names) > 1 and scope_names[1].isdigit(): pointer = getattr(pointer, f"wi_{scope_names[1]}") continue else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if scope_names[0] not in ["kernel", "scale", "embedding"]: pointer = getattr(pointer, "weight") if scope_names[0] != "embedding": logger.info(f"Transposing numpy weight of shape {array.shape} for {name}") array = np.transpose(array) try: assert ( pointer.shape == array.shape ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {name}") pointer.data = torch.from_numpy(array.astype(np.float32)) tf_weights.pop(txt_name, None) logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.") return model # Copied from transformers.models.t5.modeling_t5.T5ClassificationHead with T5->MT5 class MT5ClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config: MT5Config): super().__init__() self.dense = nn.Linear(config.d_model, config.d_model) self.dropout = nn.Dropout(p=config.classifier_dropout) self.out_proj = nn.Linear(config.d_model, config.num_labels) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel with T5->MT5, t5->mt5 class MT5PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = MT5Config load_tf_weights = load_tf_weights_in_mt5 base_model_prefix = "transformer" is_parallelizable = True supports_gradient_checkpointing = True _no_split_modules = ["MT5Block"] _keep_in_fp32_modules = ["wo"] @property def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { "decoder_input_ids": input_ids, "input_ids": input_ids, "decoder_attention_mask": input_mask, } return dummy_inputs def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, MT5LayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance( module, (MT5Model, MT5ForConditionalGeneration, MT5EncoderModel, MT5ForQuestionAnswering), ): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, "lm_head") and not self.config.tie_word_embeddings: module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, "qa_outputs"): module.qa_outputs.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) module.qa_outputs.bias.data.zero_() elif isinstance(module, MT5ForTokenClassification): if hasattr(module, "classifier"): module.classifier.weight.data.normal_(mean=0.0, std=factor * 1.0) module.classifier.bias.data.zero_() elif isinstance(module, MT5ClassificationHead): module.dense.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.dense, "bias") and module.dense.bias is not None: module.dense.bias.data.zero_() module.out_proj.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.out_proj, "bias") and module.out_proj.bias is not None: module.out_proj.bias.data.zero_() elif isinstance(module, MT5DenseActDense): # Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, "bias") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, MT5DenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, MT5Attention): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5)) def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id if decoder_start_token_id is None: raise ValueError( "self.model.config.decoder_start_token_id has to be defined. In MT5 it is usually set to the pad_token_id. " "See MT5 docs for more information." ) # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids # Copied from transformers.models.t5.modeling_t5.T5Stack with T5->MT5 class MT5Stack(MT5PreTrainedModel): def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = embed_tokens self.is_decoder = config.is_decoder self.block = nn.ModuleList( [MT5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)] ) self.final_layer_norm = MT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) # Initialize weights and apply final processing self.post_init() # Model parallel self.model_parallel = False self.device_map = None self.gradient_checkpointing = False @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): warnings.warn( "`MT5Stack.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model" " with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" " `device_map` but it needs to be a dictionary module_name to device, so for instance {'block.0': 0," " 'block.1': 1, ...}", FutureWarning, ) # Check validity of device_map self.device_map = ( get_device_map(len(self.block), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.block)) self.model_parallel = True self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys())) self.last_device = "cuda:" + str(max(self.device_map.keys())) # Load onto devices for k, v in self.device_map.items(): for layer in v: cuda_device = "cuda:" + str(k) self.block[layer] = self.block[layer].to(cuda_device) # Set embed_tokens to first layer self.embed_tokens = self.embed_tokens.to(self.first_device) # Set final layer norm to last device self.final_layer_norm = self.final_layer_norm.to(self.last_device) @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): warnings.warn( "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", FutureWarning, ) self.model_parallel = False self.device_map = None self.first_device = "cpu" self.last_device = "cpu" for i in range(len(self.block)): self.block[i] = self.block[i].to("cpu") self.embed_tokens = self.embed_tokens.to("cpu") self.final_layer_norm = self.final_layer_norm.to("cpu") torch.cuda.empty_cache() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): # Model parallel if self.model_parallel: torch.cuda.set_device(self.first_device) self.embed_tokens = self.embed_tokens.to(self.first_device) use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds") if inputs_embeds is None: if self.embed_tokens is None: raise ValueError("You have to initialize the model with valid token embeddings") inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape # required mask seq length can be calculated via length of past mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length if use_cache is True: if not self.is_decoder: raise ValueError(f"`use_cache` can only be set to `True` if {self} is used as a decoder") # initialize past_key_values with `None` if past does not exist if past_key_values is None: past_key_values = [None] * len(self.block) if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones( encoder_hidden_shape, device=inputs_embeds.device, dtype=torch.long ) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) present_key_value_states = () if use_cache else None all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions and self.is_decoder) else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] # Model parallel if self.model_parallel: torch.cuda.set_device(hidden_states.device) # Ensure that attention_mask is always on the same device as hidden_states if attention_mask is not None: attention_mask = attention_mask.to(hidden_states.device) if position_bias is not None: position_bias = position_bias.to(hidden_states.device) if encoder_hidden_states is not None: encoder_hidden_states = encoder_hidden_states.to(hidden_states.device) if encoder_extended_attention_mask is not None: encoder_extended_attention_mask = encoder_extended_attention_mask.to(hidden_states.device) if encoder_decoder_position_bias is not None: encoder_decoder_position_bias = encoder_decoder_position_bias.to(hidden_states.device) if layer_head_mask is not None: layer_head_mask = layer_head_mask.to(hidden_states.device) if cross_attn_layer_head_mask is not None: cross_attn_layer_head_mask = cross_attn_layer_head_mask.to(hidden_states.device) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.forward, hidden_states, extended_attention_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask, None, # past_key_value is always None with gradient checkpointing use_cache, output_attentions, ) else: layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) # layer_outputs is a tuple with: # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) if use_cache is False: layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] hidden_states, present_key_value_state = layer_outputs[:2] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[2] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3] # append next layer key value states if use_cache: present_key_value_states = present_key_value_states + (present_key_value_state,) if output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) # Model Parallel: If it's the last layer for that device, put things on the next device if self.model_parallel: for k, v in self.device_map.items(): if i == v[-1] and "cuda:" + str(k) != self.last_device: hidden_states = hidden_states.to("cuda:" + str(k + 1)) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) MT5_START_DOCSTRING = r""" The MT5 model was proposed in [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It's an encoder decoder transformer pre-trained in a text-to-text denoising generative setting. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MT5Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ MT5_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. MT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [MT5 Training](./mt5#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) MT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [MT5 Training](./mt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ MT5_ENCODER_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. MT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more on how to prepare `input_ids` for pretraining take a look a [MT5 Training](./mt5#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # Warning message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask __HEAD_MASK_WARNING_MSG = """ The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently, `decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions. If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers, num_heads)`. """ @add_start_docstrings( "The bare MT5 Model transformer outputting raw hidden-states without any specific head on top.", MT5_START_DOCSTRING, ) class MT5Model(MT5PreTrainedModel): r""" Examples: ```python >>> from transformers import MT5Model, AutoTokenizer >>> model = MT5Model.from_pretrained("google/mt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> summary = "Weiter Verhandlung in Syrien." >>> inputs = tokenizer(article, return_tensors="pt") >>> labels = tokenizer(text_target=summary, return_tensors="pt") >>> outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=labels["input_ids"]) >>> hidden_states = outputs.last_hidden_state ```""" model_type = "mt5" config_class = MT5Config _keys_to_ignore_on_load_unexpected = ["decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight"] _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] # Copied from transformers.models.t5.modeling_t5.T5Model.__init__ with T5->MT5 def __init__(self, config: MT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = MT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = MT5Stack(decoder_config, self.shared) # Initialize weights and apply final processing self.post_init() # Model parallel self.model_parallel = False self.device_map = None @add_start_docstrings(PARALLELIZE_DOCSTRING) # Copied from transformers.models.t5.modeling_t5.T5Model.parallelize def parallelize(self, device_map=None): warnings.warn( "`T5Model.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model" " with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" " `device_map` but it needs to be a dictionary module_name to device, so for instance {'encoder.block.0':" " 0, 'encoder.block.1': 1, ...}", FutureWarning, ) self.device_map = ( get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.encoder.block)) self.encoder.parallelize(self.device_map) self.decoder.parallelize(self.device_map) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) # Copied from transformers.models.t5.modeling_t5.T5Model.deparallelize def deparallelize(self): warnings.warn( "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", FutureWarning, ) self.encoder.deparallelize() self.decoder.deparallelize() self.encoder = self.encoder.to("cpu") self.decoder = self.decoder.to("cpu") self.model_parallel = False self.device_map = None torch.cuda.empty_cache() # Copied from transformers.models.t5.modeling_t5.T5Model.get_input_embeddings def get_input_embeddings(self): return self.shared # Copied from transformers.models.t5.modeling_t5.T5Model.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) # Copied from transformers.models.t5.modeling_t5.T5Model.get_encoder def get_encoder(self): return self.encoder # Copied from transformers.models.t5.modeling_t5.T5Model.get_decoder def get_decoder(self): return self.decoder # Copied from transformers.models.t5.modeling_t5.T5Model._prune_heads def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(MT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) # Copied from transformers.models.t5.modeling_t5.T5Model.forward with T5->MT5, t5->mt5 def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, MT5Model >>> tokenizer = AutoTokenizer.from_pretrained("google-mt5/mt5-small") >>> model = MT5Model.from_pretrained("google-mt5/mt5-small") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for MT5Model. >>> # This is not needed for torch's MT5ForConditionalGeneration as it does this internally using labels arg. >>> decoder_input_ids = model._shift_right(decoder_input_ids) >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] # Set device for model parallelism if self.model_parallel: torch.cuda.set_device(self.decoder.first_device) hidden_states = hidden_states.to(self.decoder.first_device) if decoder_input_ids is not None: decoder_input_ids = decoder_input_ids.to(self.decoder.first_device) if attention_mask is not None: attention_mask = attention_mask.to(self.decoder.first_device) if decoder_attention_mask is not None: decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings("""MT5 Model with a `language modeling` head on top.""", MT5_START_DOCSTRING) class MT5ForConditionalGeneration(MT5PreTrainedModel): r""" Examples: ```python >>> from transformers import MT5ForConditionalGeneration, AutoTokenizer >>> model = MT5ForConditionalGeneration.from_pretrained("google/mt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> summary = "Weiter Verhandlung in Syrien." >>> inputs = tokenizer(article, text_target=summary, return_tensors="pt") >>> outputs = model(**inputs) >>> loss = outputs.loss ```""" model_type = "mt5" config_class = MT5Config _keys_to_ignore_on_load_unexpected = ["decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight"] _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.__init__ with T5->MT5 def __init__(self, config: MT5Config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = MT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = MT5Stack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() # Model parallel self.model_parallel = False self.device_map = None @add_start_docstrings(PARALLELIZE_DOCSTRING) # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.parallelize def parallelize(self, device_map=None): warnings.warn( "`T5ForConditionalGeneration.parallelize` is deprecated and will be removed in v5 of Transformers, you" " should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also" " provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance" " {'encoder.block.0': 0, 'encoder.block.1': 1, ...}", FutureWarning, ) self.device_map = ( get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.encoder.block)) self.encoder.parallelize(self.device_map) self.decoder.parallelize(self.device_map) self.lm_head = self.lm_head.to(self.decoder.first_device) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.deparallelize def deparallelize(self): warnings.warn( "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", FutureWarning, ) self.encoder.deparallelize() self.decoder.deparallelize() self.encoder = self.encoder.to("cpu") self.decoder = self.decoder.to("cpu") self.lm_head = self.lm_head.to("cpu") self.model_parallel = False self.device_map = None torch.cuda.empty_cache() # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.get_input_embeddings def get_input_embeddings(self): return self.shared # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.set_output_embeddings def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.get_output_embeddings def get_output_embeddings(self): return self.lm_head # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.get_encoder def get_encoder(self): return self.encoder # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.get_decoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(MT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.forward with T5->MT5, t5->mt5 def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from transformers import AutoTokenizer, MT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("google-mt5/mt5-small") >>> model = MT5ForConditionalGeneration.from_pretrained("google-mt5/mt5-small") >>> # training >>> input_ids = tokenizer("The <extra_id_0> walks in <extra_id_1> park", return_tensors="pt").input_ids >>> labels = tokenizer("<extra_id_0> cute dog <extra_id_1> the <extra_id_2>", return_tensors="pt").input_ids >>> outputs = model(input_ids=input_ids, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits >>> # inference >>> input_ids = tokenizer( ... "summarize: studies have shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> outputs = model.generate(input_ids) >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) >>> # studies have shown that owning a dog is good for you. ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: # Convert encoder inputs in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if self.model_parallel: torch.cuda.set_device(self.decoder.first_device) if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # Set device for model parallelism if self.model_parallel: torch.cuda.set_device(self.decoder.first_device) hidden_states = hidden_states.to(self.decoder.first_device) if decoder_input_ids is not None: decoder_input_ids = decoder_input_ids.to(self.decoder.first_device) if attention_mask is not None: attention_mask = attention_mask.to(self.decoder.first_device) if decoder_attention_mask is not None: decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] # Set device for model parallelism if self.model_parallel: torch.cuda.set_device(self.encoder.first_device) self.lm_head = self.lm_head.to(self.encoder.first_device) sequence_output = sequence_output.to(self.lm_head.weight.device) if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) # move labels to correct device to enable PP labels = labels.to(lm_logits.device) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 if not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,) + output) if loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.prepare_inputs_for_generation def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, decoder_attention_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past_key_values is used if past_key_values is not None: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return { "decoder_input_ids": input_ids, "past_key_values": past_key_values, "encoder_outputs": encoder_outputs, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "decoder_attention_mask": decoder_attention_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, } # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.prepare_decoder_input_ids_from_labels def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration._reorder_cache def _reorder_cache(self, past_key_values, beam_idx): # if decoder past is not included in output # speedy decoding is disabled and no need to reorder if past_key_values is None: logger.warning("You might want to consider setting `use_cache=True` to speed up decoding") return past_key_values reordered_decoder_past = () for layer_past_states in past_key_values: # get the correct batch idx from layer past batch dim # batch dim of `past` is at 2nd position reordered_layer_past_states = () for layer_past_state in layer_past_states: # need to set correct `past` for each of the four key / value states reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)), ) if reordered_layer_past_states[0].shape != layer_past_states[0].shape: raise ValueError( f"reordered_layer_past_states[0] shape {reordered_layer_past_states[0].shape} and layer_past_states[0] shape {layer_past_states[0].shape} mismatched" ) if len(reordered_layer_past_states) != len(layer_past_states): raise ValueError( f"length of reordered_layer_past_states {len(reordered_layer_past_states)} and length of layer_past_states {len(layer_past_states)} mismatched" ) reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past @add_start_docstrings( "The bare MT5 Model transformer outputting encoder's raw hidden-states without any specific head on top.", MT5_START_DOCSTRING, ) class MT5EncoderModel(MT5PreTrainedModel): r""" Examples: ```python >>> from transformers import MT5EncoderModel, AutoTokenizer >>> model = MT5EncoderModel.from_pretrained("google/mt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> input_ids = tokenizer(article, return_tensors="pt").input_ids >>> outputs = model(input_ids) >>> hidden_state = outputs.last_hidden_state ```""" model_type = "mt5" config_class = MT5Config _tied_weights_keys = ["encoder.embed_tokens.weight"] # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.__init__ with T5->MT5 def __init__(self, config: MT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = MT5Stack(encoder_config, self.shared) # Initialize weights and apply final processing self.post_init() # Model parallel self.model_parallel = False self.device_map = None @add_start_docstrings(PARALLELIZE_DOCSTRING) # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.parallelize def parallelize(self, device_map=None): warnings.warn( "`T5EncoderModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load" " your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" " `device_map` but it needs to be a dictionary module_name to device, so for instance {'block.0': 0," " 'block.1': 1, ...}", FutureWarning, ) self.device_map = ( get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.encoder.block)) self.encoder.parallelize(self.device_map) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.deparallelize def deparallelize(self): warnings.warn( "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", FutureWarning, ) self.encoder.deparallelize() self.encoder = self.encoder.to("cpu") self.model_parallel = False self.device_map = None torch.cuda.empty_cache() # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.get_input_embeddings def get_input_embeddings(self): return self.shared # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.get_encoder def get_encoder(self): return self.encoder # Copied from transformers.models.t5.modeling_t5.T5EncoderModel._prune_heads def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.block[layer].layer[0].SelfAttention.prune_heads(heads) @add_start_docstrings_to_model_forward(MT5_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.forward with T5->MT5, t5->mt5 def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]: r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, MT5EncoderModel >>> tokenizer = AutoTokenizer.from_pretrained("google-mt5/mt5-small") >>> model = MT5EncoderModel.from_pretrained("google-mt5/mt5-small") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return encoder_outputs @add_start_docstrings( """ MT5 model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, MT5_START_DOCSTRING, ) class MT5ForSequenceClassification(MT5PreTrainedModel): _keys_to_ignore_on_load_unexpected = ["decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight"] _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] # Copied from transformers.models.t5.modeling_t5.T5ForSequenceClassification.__init__ with T5->MT5 def __init__(self, config: MT5Config): super().__init__(config) self.transformer = MT5Model(config) self.classification_head = MT5ClassificationHead(config) # Initialize weights and apply final processing self.post_init() self.model_parallel = False @add_start_docstrings_to_model_forward(MT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) # Copied from transformers.models.t5.modeling_t5.T5ForSequenceClassification.forward def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Seq2SeqSequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False if input_ids is None and inputs_embeds is not None: raise NotImplementedError( f"Passing input embeddings is currently not supported for {self.__class__.__name__}" ) # Copied from models.bart.modeling_bart.BartModel.forward different to other models, T5 automatically creates # decoder_input_ids from input_ids if no decoder_input_ids are provided if decoder_input_ids is None and decoder_inputs_embeds is None: if input_ids is None: raise ValueError( "If no `decoder_input_ids` or `decoder_inputs_embeds` are " "passed, `input_ids` cannot be `None`. Please pass either " "`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`." ) decoder_input_ids = self._shift_right(input_ids) outputs = self.transformer( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] eos_mask = input_ids.eq(self.config.eos_token_id).to(sequence_output.device) if len(torch.unique_consecutive(eos_mask.sum(1))) > 1: raise ValueError("All examples must have the same number of <eos> tokens.") batch_size, _, hidden_size = sequence_output.shape sentence_representation = sequence_output[eos_mask, :].view(batch_size, -1, hidden_size)[:, -1, :] logits = self.classification_head(sentence_representation) loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.config.num_labels == 1: self.config.problem_type = "regression" elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.config.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return Seq2SeqSequenceClassifierOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings( """ MT5 Encoder Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, MT5_START_DOCSTRING, ) class MT5ForTokenClassification(MT5PreTrainedModel): _tied_weights_keys = ["transformer.encoder.embed_tokens.weight"] # Copied from transformers.models.t5.modeling_t5.T5ForTokenClassification.__init__ with T5->MT5 def __init__(self, config: MT5Config): super().__init__(config) self.num_labels = config.num_labels self.transformer = MT5EncoderModel(config) self.dropout = nn.Dropout(config.classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) # Copied from transformers.models.t5.modeling_t5.T5ForTokenClassification.forward with T5->MT5 def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits, outputs[2:-1]) return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ MT5 Model with a span classification head on top for extractive question-answering tasks like SQuAD (linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, MT5_START_DOCSTRING, ) class MT5ForQuestionAnswering(MT5PreTrainedModel): _keys_to_ignore_on_load_unexpected = ["decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight"] _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.__init__ with T5->MT5 def __init__(self, config: MT5Config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = MT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = MT5Stack(decoder_config, self.shared) self.num_labels = config.num_labels self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() self.model_parallel = False # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.get_input_embeddings def get_input_embeddings(self): return self.shared # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.get_encoder def get_encoder(self): return self.encoder # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.get_decoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(MT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.forward def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqQuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence are not taken into account for computing the loss. Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict use_cache = use_cache if use_cache is not None else self.config.use_cache if start_positions is not None and end_positions is not None: use_cache = False # Copied from models.bart.modeling_bart.BartModel.forward # different to other models, T5 automatically creates decoder_input_ids from # input_ids if no decoder_input_ids are provided if decoder_input_ids is None and decoder_inputs_embeds is None: if input_ids is None: raise ValueError( "If no `decoder_input_ids` or `decoder_inputs_embeds` are " "passed, `input_ids` cannot be `None`. Please pass either " "`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`." ) decoder_input_ids = self._shift_right(input_ids) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=None, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1).to(start_logits.device) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1).to(end_logits.device) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + decoder_outputs[1:] + encoder_outputs return ((total_loss,) + output) if total_loss is not None else output return Seq2SeqQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/mt5/modeling_tf_mt5.py
# coding=utf-8 # Copyright 2020 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tensorflow mT5 model.""" from ...utils import logging from ..t5.modeling_tf_t5 import TFT5EncoderModel, TFT5ForConditionalGeneration, TFT5Model from .configuration_mt5 import MT5Config logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "T5Config" class TFMT5Model(TFT5Model): r""" This class overrides [`TFT5Model`]. Please check the superclass for the appropriate documentation alongside usage examples. Examples: ```python >>> from transformers import TFMT5Model, AutoTokenizer >>> model = TFMT5Model.from_pretrained("google/mt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> summary = "Weiter Verhandlung in Syrien." >>> inputs = tokenizer(article, return_tensors="tf") >>> labels = tokenizer(text_target=summary, return_tensors="tf") >>> outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=labels["input_ids"]) >>> hidden_states = outputs.last_hidden_state ```""" model_type = "mt5" config_class = MT5Config class TFMT5ForConditionalGeneration(TFT5ForConditionalGeneration): r""" This class overrides [`TFT5ForConditionalGeneration`]. Please check the superclass for the appropriate documentation alongside usage examples. Examples: ```python >>> from transformers import TFMT5ForConditionalGeneration, AutoTokenizer >>> model = TFMT5ForConditionalGeneration.from_pretrained("google/mt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> summary = "Weiter Verhandlung in Syrien." >>> inputs = tokenizer(article, text_target=summary, return_tensors="tf") >>> outputs = model(**inputs) >>> loss = outputs.loss ```""" model_type = "mt5" config_class = MT5Config class TFMT5EncoderModel(TFT5EncoderModel): r""" This class overrides [`TFT5EncoderModel`]. Please check the superclass for the appropriate documentation alongside usage examples. Examples: ```python >>> from transformers import TFMT5EncoderModel, AutoTokenizer >>> model = TFMT5EncoderModel.from_pretrained("google/mt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> input_ids = tokenizer(article, return_tensors="tf").input_ids >>> outputs = model(input_ids) >>> hidden_state = outputs.last_hidden_state ```""" model_type = "mt5" config_class = MT5Config
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/mt5/configuration_mt5.py
# coding=utf-8 # Copyright 2020, The T5 Authors and HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ mT5 model configuration""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeq2SeqConfigWithPast from ...utils import logging logger = logging.get_logger(__name__) class MT5Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MT5Model`] or a [`TFMT5Model`]. It is used to instantiate a mT5 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the mT5 [google/mt5-small](https://huggingface.co/google/mt5-small) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Arguments: vocab_size (`int`, *optional*, defaults to 250112): Vocabulary size of the T5 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`T5Model`] or [`TFT5Model`]. d_model (`int`, *optional*, defaults to 512): Size of the encoder layers and the pooler layer. d_kv (`int`, *optional*, defaults to 64): Size of the key, query, value projections per attention head. In the conventional context, it is typically expected that `d_kv` has to be equal to `d_model // num_heads`. But in the architecture of mt5-small, `d_kv` is not equal to `d_model //num_heads`. The `inner_dim` of the projection layer will be defined as `num_heads * d_kv`. d_ff (`int`, *optional*, defaults to 1024): Size of the intermediate feed forward layer in each `T5Block`. num_layers (`int`, *optional*, defaults to 8): Number of hidden layers in the Transformer encoder. num_decoder_layers (`int`, *optional*): Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set. num_heads (`int`, *optional*, defaults to 6): Number of attention heads for each attention layer in the Transformer encoder. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. relative_attention_max_distance (`int`, *optional*, defaults to 128): The maximum distance of the longer sequences for the bucket separation. dropout_rate (`float`, *optional*, defaults to 0.1): The ratio for all dropout layers. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for classifier. layer_norm_eps (`float`, *optional*, defaults to 1e-6): The epsilon used by the layer normalization layers. initializer_factor (`float`, *optional*, defaults to 1): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). feed_forward_proj (`string`, *optional*, defaults to `"gated-gelu"`): Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). """ model_type = "mt5" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self, vocab_size=250112, d_model=512, d_kv=64, d_ff=1024, num_layers=8, num_decoder_layers=None, num_heads=6, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-6, initializer_factor=1.0, feed_forward_proj="gated-gelu", is_encoder_decoder=True, use_cache=True, tokenizer_class="T5Tokenizer", tie_word_embeddings=False, pad_token_id=0, eos_token_id=1, decoder_start_token_id=0, classifier_dropout=0.0, **kwargs, ): self.vocab_size = vocab_size self.d_model = d_model self.d_kv = d_kv self.d_ff = d_ff self.num_layers = num_layers self.num_decoder_layers = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry self.num_heads = num_heads self.relative_attention_num_buckets = relative_attention_num_buckets self.relative_attention_max_distance = relative_attention_max_distance self.dropout_rate = dropout_rate self.classifier_dropout = classifier_dropout self.layer_norm_epsilon = layer_norm_epsilon self.initializer_factor = initializer_factor self.feed_forward_proj = feed_forward_proj self.use_cache = use_cache act_info = self.feed_forward_proj.split("-") self.dense_act_fn = act_info[-1] self.is_gated_act = act_info[0] == "gated" if len(act_info) > 1 and act_info[0] != "gated" or len(act_info) > 2: raise ValueError( f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer. " "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": self.dense_act_fn = "gelu_new" super().__init__( is_encoder_decoder=is_encoder_decoder, tokenizer_class=tokenizer_class, tie_word_embeddings=tie_word_embeddings, pad_token_id=pad_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, **kwargs, ) class MT5OnnxConfig(OnnxSeq2SeqConfigWithPast): @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def inputs(self) -> Mapping[str, Mapping[int, str]]: common_inputs = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: common_inputs["attention_mask"][1] = "past_encoder_sequence + sequence" common_inputs["decoder_input_ids"] = {0: "batch"} common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"} common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(common_inputs, direction="inputs") return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def default_onnx_opset(self) -> int: return 13 @property def atol_for_validation(self) -> float: return 5e-4
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/idefics/perceiver.py
# This code was adapted from https://github.com/lucidrains/flamingo-pytorch licensed under the MIT License. # # MIT License # # Copyright (c) 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and github/lonePatient # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ Generic interface to various configurations of the Perceiver Resampler, that simply takes in a series of (potentially time-indexed) contextual embeddings, and "resamples" (compresses) them down to a pre-specified number of latents! Note that the Perceiver in general resamples based solely off the *long-range* context; there's a nice opportunity here to prime the Perceiver Resampler with say a single layer's worth of language embeddings (the target domain), and use that to softly "retrieve & compress" what we need --> this would be a novel contribution we should explore. References: - DeepMind's Flamingo: https://www.deepmind.com/blog/tackling-multiple-tasks-with-a-single-visual-language-model - Code borrowed w/ love from: https://github.com/lucidrains/flamingo-pytorch """ from typing import Optional, Tuple import torch import torch.nn as nn from .configuration_idefics import IdeficsConfig class IdeficsPerceiverResampler(nn.Module): def __init__( self, config: IdeficsConfig, embed_dim: int, depth: int, n_heads: int, head_dim: int, n_latents: int ) -> None: """ Instantiates a Perceiver Resampler that operates over a sequence of embeddings (say from a ResNet or ViT or MAE) of a given dimension, performs `depth` blocks of cross-attention with a fixed `n_latents` inputs, then returns a Tensor of shape [bsz, n_latents, embed_dim]. :param embed_dim: Dimensionality of embeddings being fed to the Perceiver Resampler (also dimensionality of latent embeddings *returned* by the Perceiver Resampler. Could be e.g., VIT embed_dim, ResNet pool dim, and so on. Args: config (`IdeficsConfig`): config object embed_dim (`int`): The size of each embedding vector depth (`int`): Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (< 3). n_heads (`int`): Number of heads in each Transformer block (for multi-headed self-attention). head_dim (`int`): Dimensionality of each head projection in the Transformer block. n_latents (`int`): Number of latent embeddings to resample ("compress") the input sequence to (usually < 128). """ super().__init__() self.embed_dim, self.n_heads, self.head_dim, self.n_latents = embed_dim, n_heads, head_dim, n_latents self.qk_layer_norms = config.perceiver_config.qk_layer_norms_perceiver # Create Latents for Perceiver self.latents = nn.Parameter(torch.randn(self.n_latents, self.embed_dim), requires_grad=True) self.intermediate_dim = ( self.embed_dim * 4 if not hasattr(config.vision_config, "embed_dim") else config.vision_config.embed_dim * 4 ) # Create Transformer Blocks self.blocks = nn.ModuleList( [ nn.ModuleList( [ IdeficsPerceiverAttention(self.embed_dim, self.n_heads, self.head_dim, self.qk_layer_norms), IdeficsMLP(self.intermediate_dim, config), ] ) for _ in range(depth) ] ) self.layer_norm = nn.LayerNorm(self.embed_dim) def forward(self, context: torch.Tensor) -> torch.Tensor: """Resample arbitrary length context & *compress* down to self.n_latents latent embeddings""" # einsum.repeat(self.latents, "seq embed -> bsz seq embed", bsz=context.shape[0]) latents = self.latents.repeat(context.shape[0], 1, 1) # Feed through Perceiver Attention blocks... for attn, ff in self.blocks: latents = attn(context, latents) + latents latents = ff(latents) + latents return self.layer_norm(latents) class IdeficsPerceiverAttention(nn.Module): def __init__(self, embed_dim: int, n_heads: int, head_dim: int, qk_layer_norms: bool) -> None: """Perceiver Cross-Attention Module --> let long-form inputs be `context`, resampled embeddings be `latents`""" super().__init__() self.embed_dim, self.n_heads, self.head_dim = embed_dim, n_heads, head_dim self.qk_layer_norms = qk_layer_norms # Normalization & Scaling self.context_layer_norm = nn.LayerNorm(self.embed_dim) self.latents_layer_norm = nn.LayerNorm(self.embed_dim) if self.qk_layer_norms: self.q_layer_norm = nn.LayerNorm(self.head_dim) self.k_layer_norm = nn.LayerNorm(self.head_dim) self.qk_scale = self.head_dim**-0.5 # Q, K, V Projection (no bias -- detail from Perceiver/Flamingo Papers). self.q_proj = nn.Linear(self.embed_dim, self.n_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(self.embed_dim, self.n_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(self.embed_dim, self.n_heads * self.head_dim, bias=False) self.output_proj = nn.Linear(self.n_heads * self.head_dim, embed_dim, bias=False) def forward(self, context: torch.Tensor, latents: torch.Tensor) -> torch.Tensor: """ Runs Perceiver Self-Attention, with special (context, latents) appended along the `seq` dimension! Args: context (`torch.Tensor`): Tensor of shape `[bsz, seq, embed_dim]` representing long-form context to resample. latents (`torch.Tensor`): Tensor of shape `[bsz, n_latents, embed_dim]` representing fixed length latents to compress to. Returns: `torch.Tensor`: Tensor of shape `[bsz, n_latents, embed_dim]` representing attention over latents w/ cross from context. """ context = self.context_layer_norm(context) latents = self.latents_layer_norm(latents) batch_size, seq_length, embed_dim = context.shape[:3] # Query, Key, Value Projections --> Note that in Flamingo, latents are *concatenated* with context prior to attn! # Note: This results in queries w/ `seq = n_latents`, and keys, values with `seq = len(context) + n_latents` q = self.q_proj(latents) k = self.k_proj(torch.cat([context, latents], dim=-2)) v = self.v_proj(torch.cat([context, latents], dim=-2)) # Multiheaded Self-Attention w/ stable softmax (subtract per-row max -- `amax` -- before softmax call) # =>> `attn` should be a 2D matrix of shape [n_latents x (context + n_latents)] # einsum.rearrange(x, "bsz seq (heads embed) -> bsz heads seq embed", heads=self.n_heads) q, k, v = [x.reshape(batch_size, x.shape[1], self.n_heads, self.head_dim).transpose(1, 2) for x in (q, k, v)] if self.qk_layer_norms: q = self.q_layer_norm(q) k = self.k_layer_norm(k) scores = torch.einsum("... i d, ... j d -> ... i j", q * self.qk_scale, k) stabilized_scores = scores - (scores.amax(dim=-1, keepdim=True).detach()) attn = stabilized_scores.softmax(dim=-1) # Attend & project back to output... resampled = torch.einsum("... i j, ... j d -> ... i d", attn, v) # einsum.rearrange(resampled, "bsz heads seq embed -> bsz seq (heads embed)", heads=self.n_heads) return self.output_proj(resampled.transpose(1, 2).flatten(-2)) class IdeficsMLP(nn.Module): def __init__(self, intermediate_size, config: IdeficsConfig): """Simple MLP block with intermediate_size and embedding size""" super().__init__() self.embed_dim = config.vision_config.embed_dim self.ln = nn.LayerNorm(self.embed_dim) self.fc = nn.Linear(self.embed_dim, intermediate_size, bias=False) self.act = nn.ReLU() self.c_proj = nn.Linear(intermediate_size, self.embed_dim, bias=False) def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor: hidden_states = self.ln(hidden_states) hidden_states = self.fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) return hidden_states
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/idefics/__init__.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = {"configuration_idefics": ["IDEFICS_PRETRAINED_CONFIG_ARCHIVE_MAP", "IdeficsConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["image_processing_idefics"] = ["IdeficsImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_idefics"] = [ "IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST", "IdeficsForVisionText2Text", "IdeficsModel", "IdeficsPreTrainedModel", ] _import_structure["processing_idefics"] = ["IdeficsProcessor"] if TYPE_CHECKING: from .configuration_idefics import IDEFICS_PRETRAINED_CONFIG_ARCHIVE_MAP, IdeficsConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_idefics import IdeficsImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_idefics import ( IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST, IdeficsForVisionText2Text, IdeficsModel, IdeficsPreTrainedModel, ) from .processing_idefics import IdeficsProcessor else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/idefics/image_processing_idefics.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Idefics.""" from typing import Callable, Dict, List, Optional, Union from PIL import Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available IDEFICS_STANDARD_MEAN = [0.48145466, 0.4578275, 0.40821073] IDEFICS_STANDARD_STD = [0.26862954, 0.26130258, 0.27577711] def convert_to_rgb(image): # `image.convert("RGB")` would only work for .jpg images, as it creates a wrong background # for transparent images. The call to `alpha_composite` handles this case if image.mode == "RGB": return image image_rgba = image.convert("RGBA") background = Image.new("RGBA", image_rgba.size, (255, 255, 255)) alpha_composite = Image.alpha_composite(background, image_rgba) alpha_composite = alpha_composite.convert("RGB") return alpha_composite class IdeficsImageProcessor(BaseImageProcessor): r""" Constructs a Idefics image processor. Args: image_size (`int`, *optional*, defaults to 224): Resize to image size image_mean (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. image_num_channels (`int`, *optional*, defaults to 3): Number of image channels. """ model_input_names = ["pixel_values"] def __init__( self, image_size: int = 224, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, image_num_channels: Optional[int] = 3, **kwargs, ) -> None: super().__init__(**kwargs) self.image_size = image_size self.image_num_channels = image_num_channels self.image_mean = image_mean self.image_std = image_std def preprocess( self, images: ImageInput, image_num_channels: Optional[int] = 3, image_size: Optional[Dict[str, int]] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, transform: Callable = None, **kwargs, ) -> TensorType.PYTORCH: """ Preprocess a batch of images. Args: images (`ImageInput`): A list of images to preprocess. image_size (`int`, *optional*, defaults to `self.image_size`): Resize to image size image_num_channels (`int`, *optional*, defaults to `self.image_num_channels`): Number of image channels. image_mean (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. transform (`Callable`, *optional*, defaults to `None`): A custom transform function that accepts a single image can be passed for training. For example, `torchvision.Compose` can be used to compose multiple transforms. If `None` - an inference mode is assumed - and then a preset of inference-specific transforms will be applied to the images Returns: a PyTorch tensor of the processed images """ image_size = image_size if image_size is not None else self.image_size image_num_channels = image_num_channels if image_num_channels is not None else self.image_num_channels image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = (image_size, image_size) if isinstance(images, list) and len(images) == 0: return [] images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) # For training a user needs to pass their own set of transforms as a Callable. # For reference this is what was used in the original IDEFICS training: # transform = transforms.Compose([ # convert_to_rgb, # transforms.RandomResizedCrop((size, size), scale=(0.9, 1.0), interpolation=transforms.InterpolationMode.BICUBIC), # transforms.ToTensor(), # transforms.Normalize(mean=image_mean, std=image_std), # ]) if transform is not None: if not is_torch_available(): raise ImportError("To pass in `transform` torch must be installed") import torch images = [transform(x) for x in images] return torch.stack(images) # for inference we do the exact transforms that were used to train IDEFICS images = [convert_to_rgb(x) for x in images] # further transforms expect numpy arrays images = [to_numpy_array(x) for x in images] images = [resize(x, size, resample=PILImageResampling.BICUBIC) for x in images] images = [self.rescale(image=image, scale=1 / 255) for image in images] images = [self.normalize(x, mean=image_mean, std=image_std) for x in images] images = [to_channel_dimension_format(x, ChannelDimension.FIRST) for x in images] # TODO: this converts to torch tensors - switch to convert_to_tensors once it becomes available images = BatchFeature(data={"pixel_values": images}, tensor_type=TensorType.PYTORCH)["pixel_values"] return images
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/idefics/processing_idefics.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for IDEFICS. """ from typing import Callable, List, Optional, Union from urllib.parse import urlparse from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, TextInput, TruncationStrategy from ...utils import TensorType, is_torch_available if is_torch_available(): import torch IMAGE_TOKEN = "<image>" # copied from m4.training.packing def incremental_to_binary_attention_mask(incremental_mask, num_classes=-1): # This function converts: [-1, 0, 1] => [[0, 0], [1, 0], [0, 1]] # If any of images index are more than num_classes, set them to -1. # Words after the max number of images allowed have been seen don't attend on anything if num_classes != -1: incremental_mask[incremental_mask >= num_classes] = -1 negatives = incremental_mask == -1 incremental_mask[negatives] = 0 attn_mask = torch.nn.functional.one_hot(incremental_mask, num_classes=num_classes) attn_mask[negatives, :] = 0 return attn_mask # copied from m4.training.packing def image_attention_mask_for_packed_input_ids(input_ids, tokenizer): image_attention_mask = torch.full_like(input_ids, fill_value=-1) next_image_attention_mask = torch.full_like(input_ids, fill_value=-1) image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN) eod_token_id = tokenizer.eos_token_id for batch_idx in range(input_ids.size(0)): count = -1 seen_eod = False for idx, token_id in enumerate(input_ids[batch_idx]): if token_id == image_token_id: count += 1 image_attention_mask[batch_idx][idx] = count seen_eod = False else: image_attention_mask[batch_idx][idx] = count if seen_eod: image_attention_mask[batch_idx][idx] = -1 if token_id == eod_token_id: seen_eod = True for batch_idx in range(input_ids.size(0)): count = -1 seen_eod = False for idx in range(input_ids[batch_idx].size(0) - 1, -1, -1): token_id = input_ids[batch_idx][idx] if token_id == image_token_id: count += 1 next_image_attention_mask[batch_idx][idx] = count seen_eod = False else: next_image_attention_mask[batch_idx][idx] = count if token_id == eod_token_id: seen_eod = True if seen_eod: next_image_attention_mask[batch_idx][idx] = -1 non_negative_indices = next_image_attention_mask[batch_idx] != -1 next_image_attention_mask[batch_idx][non_negative_indices] -= count next_image_attention_mask[batch_idx][non_negative_indices] *= -1 return image_attention_mask, next_image_attention_mask def is_url(string): """Checks if the passed string contains a valid url and nothing else. e.g. if space is included it's immediately invalidated the url""" if " " in string: return False result = urlparse(string) return all([result.scheme, result.netloc]) class IdeficsProcessor(ProcessorMixin): r""" Constructs a IDEFICS processor which wraps a LLama tokenizer and IDEFICS image processor into a single processor. [`IdeficsProcessor`] offers all the functionalities of [`IdeficsImageProcessor`] and [`LlamaTokenizerFast`]. See the docstring of [`~IdeficsProcessor.__call__`] and [`~IdeficsProcessor.decode`] for more information. Args: image_processor (`IdeficsImageProcessor`): An instance of [`IdeficsImageProcessor`]. The image processor is a required input. tokenizer (`LlamaTokenizerFast`): An instance of [`LlamaTokenizerFast`]. The tokenizer is a required input. image_size (`int`, *optional*, defaults to 224): Image size (assuming a square image) """ attributes = ["image_processor", "tokenizer"] image_processor_class = "IdeficsImageProcessor" tokenizer_class = "LlamaTokenizerFast" def __init__(self, image_processor, tokenizer=None, image_size=224, add_end_of_utterance_token=None, **kwargs): if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(image_processor, tokenizer) self.current_processor = self.image_processor self.image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN) self.default_image_dims = ( self.image_processor.image_num_channels, self.image_processor.image_size, self.image_processor.image_size, ) self.tokenizer_was_trained_with_end_of_utterance_token = ( True if "<end_of_utterance>" in self.tokenizer.special_tokens_map.get("additional_special_tokens", []) else False ) def __call__( self, prompts: Union[List[TextInput], List[List[TextInput]]], padding: Union[bool, str, PaddingStrategy] = "longest", truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, transform: Callable = None, add_eos_token=False, add_end_of_utterance_token=None, debug=False, return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH, ) -> BatchEncoding: """This method takes batched or non-batched prompts made of text and images and converts them into prompts that the model was trained on and prepares the image pixel values for the model to process. Args: prompts (`Union[List[TextInput], [List[List[TextInput]]]]`): either a single prompt or a batched list of prompts - see the detailed description immediately after the end of the arguments doc section. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `"longest"`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'`: No padding. This will raise an error if the input sequences are of different lengths. Note: Unlike most processors, which set padding=`False` by default, `IdeficsProcessor` sets `padding="longest"` by default. See https://github.com/huggingface/transformers/pull/29449#pullrequestreview-1925576061 for why. max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). truncation (`bool`, *optional*): Activates truncation to cut input sequences longer than `max_length` to `max_length`. transform (`Callable`, *optional*): A custom transform function that accepts a single image can be passed for training. For example, `torchvision.Compose` can be used to compose multiple functions. If `None` a preset inference-specific set of transforms will be applied to the images add_eos_token (`bool`, *optional*, defaults to `False`): Adds `eos_token` at the end of the final prompt if True` add_end_of_utterance_token (`bool`, *optional*) Whether to automatically add `<end_of_utterance>` after each prompt's text input (unless followed by an image). If `None` the tokenizer will be checked instead and if this token is found in `additional_special_tokens` then the value will be `True`. debug (`bool`, *optional*, defaults to `False`): `True` value will help debug prompt generation by dumping useful information return_tensors (`str` or `TensorType`, *optional*, defaults to `TensorType.PYTORCH`): The type of tensors to return. Can be one of: - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. Returns: a dict with entries: `input_ids`, `attention_mask`, `pixel_values`, `image_attention_mask` which can be directly passed to `model.generate` Detailed explanation: Each entry in `prompts` is either a text to be passed as is or an image that will be processed. An image can be either an image object (`PIL.Image`) or a url from which the image can be retrieved. When the processor encounters an image it'll inject `<fake_token_around_image><image><fake_token_around_image>` entry into the prompt. Example: ```python checkpoint = "HuggingFaceM4/idefics-9b" processor = AutoProcessor.from_pretrained(checkpoint) url = "https://hips.hearstapps.com/hmg-prod/images/cute-photos-of-cats-in-grass-1593184777.jpg" img = processor.image_processor.fetch_images([url])[0] prompts = [ "User:", img, "Describe this image.\nAssistant: An image of two kittens in grass.\n", "User:", "https://hips.hearstapps.com/hmg-prod/images/dog-puns-1581708208.jpg", "Describe this image.\nAssistant:", ] inputs = processor(prompts, return_tensors="pt") generated_ids = model.generate(**inputs, max_length=100) generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] ``` In this example the `prompts` will be converted into: ``` <s>User:<fake_token_around_image><image><fake_token_around_image>Describe this image. Assistant: An image of two kittens in grass. User:<fake_token_around_image><image><fake_token_around_image>Describe this image. Assistant:' ``` and the two images will be massaged using [`IdeficsImageProcessor.__call__`] method and placed inside the `pixel_values` dict entry of the return value. This example also examplifies that images can be passed as objects or as text urls. It can be seen that the first image is passed as object and the second one as a url. To do training do: ```python image_transform = transforms.Compose( [ transforms.RandomResizedCrop( (w, h), scale=(0.9, 1.0), interpolation=transforms.InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize(mean=self.image_mean, std=self.image_std), ] ) inputs = processor(prompts, transform=image_transform, return_tensors="pt") ``` In order to help debug prompt generation enable `debug=True` which will show you what's happening. """ # if the value isn't overriden by the user, check if the tokenizer was trained with this token and then use it if add_end_of_utterance_token is None: add_end_of_utterance_token = self.tokenizer_was_trained_with_end_of_utterance_token # turn non-batched prompts into batched if not any(isinstance(i, list) for i in prompts): prompts = [prompts] fake_token = "<fake_token_around_image>" image_token = "<image>" end_of_utterance_token = "<end_of_utterance>" def image_tokens(last_was_image): if last_was_image: return image_token + fake_token else: return fake_token + image_token + fake_token all_prompts = [] all_images = [] for sample in prompts: # the model was trained on samples starting with <s> full_text = f"{self.tokenizer.bos_token}" # an image can either be an image object in the item or the url, everything else is a verbatim prompt text image_objects = [] last_was_image = False last_was_text = False for i, item in enumerate(sample): if i > 0: last_was_text = True if not last_was_image else False if isinstance(item, str): item = item.strip(" ") if is_url(item): image = self.image_processor.fetch_images(item) full_text += image_tokens(last_was_image) image_objects.append(image) last_was_image = True else: # we add end_of_utterance_token between each subsequent text prompts (but not at the last one!) if add_end_of_utterance_token and last_was_text: full_text += end_of_utterance_token full_text += item last_was_image = False else: # must be an image obj full_text += image_tokens(last_was_image) image_objects.append(item) last_was_image = True if add_eos_token: full_text += self.tokenizer.eos_token if debug is True: print(f"{full_text=}") image_objects = self.image_processor(image_objects, transform=transform) all_prompts.append(full_text) all_images.append(image_objects) text_encoding = self.tokenizer( text=all_prompts, add_special_tokens=False, padding=padding, truncation=truncation, max_length=max_length, ) all_texts = text_encoding["input_ids"] all_attention_masks = text_encoding["attention_mask"] # max_num_images has to be at least 1 even when there are no images max_num_images = max(len(x) for x in all_images) max_num_images = max(1, max_num_images) at_least_one_image = sum(len(x) for x in all_images) > 0 output_input_ids = [] output_images = [] output_attention_masks = [] for text, attention_mask, images in zip(all_texts, all_attention_masks, all_images): padded_input_ids = text image_count = padded_input_ids.count(self.image_token_id) local_max_num_images = min(image_count, max_num_images) current_images = images[:local_max_num_images] if len(current_images) > 0: padded_image_tensor = torch.zeros(max_num_images, *current_images.size()[1:]) padded_image_tensor[: current_images.size(0)] = current_images else: padded_image_tensor = torch.zeros(max_num_images, *self.default_image_dims) output_images.append(padded_image_tensor) output_input_ids.append(torch.tensor(padded_input_ids)) output_attention_masks.append(torch.tensor(attention_mask)) output_input_ids = torch.stack(output_input_ids) output_images = torch.stack(output_images) output_attention_masks = torch.stack(output_attention_masks) if at_least_one_image: image_attention_mask, _ = image_attention_mask_for_packed_input_ids(output_input_ids, self.tokenizer) image_attention_mask = incremental_to_binary_attention_mask( image_attention_mask, num_classes=max_num_images ) else: # in full language mode we set the image mask to all-0s image_attention_mask = torch.zeros( output_input_ids.shape[0], output_input_ids.shape[1], 1, dtype=torch.bool ) return BatchFeature( data={ "input_ids": output_input_ids, "attention_mask": output_attention_masks, "pixel_values": output_images, "image_attention_mask": image_attention_mask, } ) def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/idefics/configuration_idefics.py
# coding=utf-8 # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Idefics model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) from ..deprecated._archive_maps import IDEFICS_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class IdeficsVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Idefics-9B. e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. (elsewhere referred to as `hidden_size`) image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. intermediate_size (`int`, *optional*, defaults to 5120): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. patch_size (`int`, *optional*, defaults to 14): The size (resolution) of each patch. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. image_num_channels (`int`, *optional*, defaults to `3`): Number of image channels. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization testing). initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. """ model_type = "idefics" attribute_map = { "hidden_size": "embed_dim", } def __init__( self, embed_dim=768, image_size=224, intermediate_size=5120, patch_size=14, num_hidden_layers=32, num_attention_heads=16, num_channels=3, hidden_act="gelu", layer_norm_eps=1e-5, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs, ): self.embed_dim = embed_dim self.image_size = image_size self.intermediate_size = intermediate_size self.patch_size = patch_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.layer_norm_eps = layer_norm_eps self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.hidden_act = hidden_act super().__init__(**kwargs) class IdeficsPerceiverConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Idefics-9B. e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: use_resampler (`bool`, *optional*, defaults to `False`): Whether or not to use the resampler resampler_n_latents (`int`, *optional*, defaults to ): Number of latent embeddings to resample ("compress") the input sequence to (usually < 128). resampler_depth (`int`, *optional*, defaults to 6): Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (< 3). resampler_n_heads (`int`, *optional*, defaults to 16): Number of heads in each Transformer block (for multi-headed self-attention). resampler_head_dim (`int`, *optional*, defaults to 96): Dimensionality of each head projection in the Transformer block. qk_layer_norms_perceiver (`bool`, *optional*, defaults to `False`): Whether or not to use qk layer norms in perceiver """ model_type = "idefics" def __init__( self, use_resampler=False, resampler_n_latents=64, resampler_depth=6, resampler_n_heads=16, resampler_head_dim=96, qk_layer_norms_perceiver=False, **kwargs, ): self.use_resampler = use_resampler self.resampler_n_latents = resampler_n_latents self.resampler_depth = resampler_depth self.resampler_n_heads = resampler_n_heads self.resampler_head_dim = resampler_head_dim self.qk_layer_norms_perceiver = qk_layer_norms_perceiver super().__init__(**kwargs) class IdeficsConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Idefics-9B. e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: additional_vocab_size (`int`, *optional`, defaults to 0): Additional vocabulary size of the model, typically for the special "<img>" token. Additional vocab tokens are always trainable whereas regular vocab tokens can be frozen or not. vocab_size (`int`, *optional*, defaults to 32000): Vocabulary size of the Idefics model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`~IdeficsModel`] hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 11008): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer encoder. dropout (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. alpha_initializer (`str`, *optional*, defaults to `"zeros"`): Initialization type for the alphas. alphas_initializer_range (`float`, *optional*, defaults to 0.0): The standard deviation of the truncated_normal_initializer for initializing the alphas in the Gated Cross Attention. alpha_type (`str`, *optional*, defaults to `"float"`): Whether the gating alphas should be vectors or single floats. rms_norm_eps (`float`, *optional*, defaults to 1e-6): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 0) Padding token id. bos_token_id (`int`, *optional*, defaults to 1) Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 2) End of stream token id. tie_word_embeddings(`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings cross_layer_interval (`int`, *optional*, default to 1) Interval for cross attention (from text to image) layers. qk_layer_norms (`bool`, *optional*, defaults to `False`): Whether to add layer norm after q and k freeze_text_layers (`bool`, *optional*, defaults to `True`): Whether to freeze text layers freeze_text_module_exceptions (`bool`, *optional*, defaults to `[]`): Exceptions to freezing text layers when `freeze_text_layers` is `True` freeze_lm_head (`bool`, *optional*, defaults to `False`): Whether to freeze lm head freeze_vision_layers (`bool`, *optional*, defaults to `True`): Whether to freeze vision layers freeze_vision_module_exceptions (`bool`, *optional*, defaults to `[]`): Exceptions to freezing vision layers when `freeze_vision_layers` is `True` use_resampler (`bool`, *optional*, defaults to `False`): Whether to use the Resampler vision_config (`IdeficsVisionConfig`, *optional*): Custom vision config or dict perceiver_config (`IdeficsPerceiverConfig`, *optional*): Custom perceiver config or dict Example: ```python >>> from transformers import IdeficsModel, IdeficsConfig >>> # Initializing a Idefics idefics-9b style configuration >>> configuration = IdeficsConfig() >>> # Initializing a model from the idefics-9b style configuration >>> model = IdeficsModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "idefics" is_composition = False def __init__( self, vocab_size=32000, additional_vocab_size=0, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, dropout=0.0, hidden_act="silu", initializer_range=0.02, alpha_initializer="zeros", alphas_initializer_range=0.0, alpha_type="float", rms_norm_eps=1e-6, use_cache=True, pad_token_id=0, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, cross_layer_interval=1, qk_layer_norms=False, freeze_text_layers=True, freeze_text_module_exceptions=[], freeze_lm_head=False, freeze_vision_layers=True, freeze_vision_module_exceptions=[], use_resampler=False, vision_config=None, perceiver_config=None, **kwargs, ): self.vocab_size = vocab_size self.additional_vocab_size = additional_vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.dropout = dropout self.hidden_act = hidden_act self.initializer_range = initializer_range self.alpha_initializer = alpha_initializer self.alphas_initializer_range = alphas_initializer_range self.alpha_type = alpha_type self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.cross_layer_interval = cross_layer_interval self.qk_layer_norms = qk_layer_norms self.freeze_vision_layers = freeze_vision_layers self.freeze_text_layers = freeze_text_layers self.freeze_text_module_exceptions = freeze_text_module_exceptions self.freeze_vision_module_exceptions = freeze_vision_module_exceptions self.freeze_lm_head = freeze_lm_head self.use_resampler = use_resampler if perceiver_config is None: self.perceiver_config = IdeficsPerceiverConfig() elif isinstance(perceiver_config, dict): self.perceiver_config = IdeficsPerceiverConfig(**perceiver_config) elif isinstance(perceiver_config, IdeficsPerceiverConfig): self.perceiver_config = perceiver_config if vision_config is None: self.vision_config = IdeficsVisionConfig() elif isinstance(vision_config, dict): self.vision_config = IdeficsVisionConfig(**vision_config) elif isinstance(vision_config, IdeficsVisionConfig): self.vision_config = vision_config super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) # IMPORTANT: Do not do any __init__ args-based checks in the constructor, since # PretrainedConfig.from_dict first instantiates the class with the config dict and only then # updates the config object with `kwargs` from from_pretrained, so during the instantiation # of this object many attributes have default values and haven't yet been overridden. # Do any required checks inside `from_pretrained` once the superclass' `from_pretrained` was run.
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/idefics/vision.py
# coding=utf-8 # Copyright 2021 The OpenAI Team Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch IdeficsVision model: a copy of CLIPVisionModel using a simpler config object""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...utils import ModelOutput, logging from .configuration_idefics import IdeficsVisionConfig logger = logging.get_logger(__name__) @dataclass class IdeficsVisionModelOutput(ModelOutput): """ Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states. Args: image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): The image embeddings obtained by applying the projection layer to the pooler_output. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ image_embeds: Optional[torch.FloatTensor] = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None # Adapted from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings class IdeficsVisionEmbeddings(nn.Module): def __init__(self, config: IdeficsVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) self.patch_embedding = nn.Conv2d( in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False, ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False) # Heavily inspired from https://github.com/huggingface/transformers/blob/v4.33.0/src/transformers/models/vit/modeling_vit.py#L82 def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. Source: https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174 """ num_patches = embeddings.shape[1] - 1 pos_embed = self.position_embedding(self.position_ids) num_positions = pos_embed.shape[1] - 1 if num_patches == num_positions and height == width: return pos_embed class_pos_embed = pos_embed[:, 0] patch_pos_embed = pos_embed[:, 1:] embed_dim = embeddings.shape[-1] num_h_patches = height // self.config.patch_size num_w_patches = width // self.config.patch_size # we add a small number to avoid floating point error in the interpolation # see discussion at https://github.com/facebookresearch/dino/issues/8 num_h_patches, num_w_patches = num_h_patches + 0.1, num_w_patches + 0.1 sqrt_num_positions = math.sqrt(num_positions) patch_pos_embed = patch_pos_embed.reshape(1, int(sqrt_num_positions), int(sqrt_num_positions), embed_dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) fp32_upcasting = patch_pos_embed.dtype == torch.bfloat16 if fp32_upcasting: logger.warning_once( "Upcasting patch_pos_embed to fp32 for interpolation since `upsample_bicubic2d_out_frame` in nn.functional.interpolate " "is not implemented for 'torch.bfloat16' dtype. This will result in a slight overhead." ) patch_pos_embed = patch_pos_embed.to(torch.float) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, scale_factor=(num_h_patches / sqrt_num_positions, num_w_patches / sqrt_num_positions), mode="bicubic", align_corners=False, ) if fp32_upcasting: patch_pos_embed = patch_pos_embed.to(torch.bfloat16) if int(num_h_patches) != patch_pos_embed.shape[-2] or int(num_w_patches) != patch_pos_embed.shape[-1]: raise ValueError( f"Number of patches for images ({int(num_h_patches), int(num_w_patches)}) don't match the " f"shape of position embedding ({patch_pos_embed.shape[-2], patch_pos_embed.shape[-1]})" ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, embed_dim) return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1) def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool = False) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape if not interpolate_pos_encoding: if height != self.image_size or width != self.image_size: raise ValueError( f"Input image size ({height}*{width}) doesn't match model" f" ({self.image_size}*{self.image_size}). You should try to set `interpolate_pos_encoding=True`" ) target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) # add positional encoding to each token if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings # Copied from transformers.models.clip.modeling_clip.CLIPAttention with CLIP->IdeficsVision class IdeficsVisionAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, embed_dim = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scale key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) # apply the causal_attention_mask first if causal_attention_mask is not None: if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {causal_attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit akward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->IdeficsVision class IdeficsVisionMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states # Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->IdeficsVision class IdeficsVisionEncoderLayer(nn.Module): def __init__(self, config: IdeficsVisionConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = IdeficsVisionAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = IdeficsVisionMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->IdeficsVision class IdeficsVisionEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`IdeficsVisionEncoderLayer`]. Args: config: IdeficsVisionConfig """ def __init__(self, config: IdeficsVisionConfig): super().__init__() self.config = config self.layers = nn.ModuleList([IdeficsVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Causal mask for the text model. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) # Adapted from transformers.models.clip.modeling_clip.CLIPVisionTransformer class IdeficsVisionTransformer(nn.Module): def __init__(self, config: IdeficsVisionConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = IdeficsVisionEmbeddings(config) self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.encoder = IdeficsVisionEncoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) # Adapted from transformers.models.clip.modeling_clip.CLIPVisionTransformer.forward def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = False, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs = self.encoder( inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/idefics/modeling_idefics.py
# coding=utf-8 # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Idefics model.""" from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ... import PreTrainedModel from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask_for_sdpa from ...modeling_outputs import ModelOutput from ...modeling_utils import PretrainedConfig from ...pytorch_utils import ALL_LAYERNORM_LAYERS from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_idefics import IdeficsConfig from .perceiver import IdeficsPerceiverResampler from .vision import IdeficsVisionTransformer logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "IdeficsConfig" from ..deprecated._archive_maps import IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 @dataclass class IdeficsBaseModelOutputWithPast(ModelOutput): """ Base class for Idefics model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. image_hidden_states (`tuple(torch.FloatTensor)`, *optional*): Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver """ last_hidden_state: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass class IdeficsCausalLMOutputWithPast(ModelOutput): """ Base class for Idefics causal language model (or autoregressive) outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. image_hidden_states (`tuple(torch.FloatTensor)`, *optional*): Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None def expand_inputs_for_generation( input_ids, expand_size=1, is_encoder_decoder=False, attention_mask=None, encoder_outputs=None, **model_kwargs, ): expanded_return_idx = ( torch.arange(input_ids.shape[0]).view(-1, 1).repeat(1, expand_size).view(-1).to(input_ids.device) ) input_ids = input_ids.index_select(0, expanded_return_idx) model_kwargs["pixel_values"] = model_kwargs.get("pixel_values", None) model_kwargs["image_encoder_embeddings"] = model_kwargs.get("image_encoder_embeddings", None) model_kwargs["perceiver_embeddings"] = model_kwargs.get("perceiver_embeddings", None) model_kwargs["image_attention_mask"] = model_kwargs.get("image_attention_mask", None) if "token_type_ids" in model_kwargs: token_type_ids = model_kwargs["token_type_ids"] model_kwargs["token_type_ids"] = token_type_ids.index_select(0, expanded_return_idx) if attention_mask is not None: model_kwargs["attention_mask"] = attention_mask.index_select(0, expanded_return_idx) if model_kwargs["image_attention_mask"] is not None: model_kwargs["image_attention_mask"] = model_kwargs["image_attention_mask"].index_select( 0, expanded_return_idx ) if model_kwargs["pixel_values"] is not None: model_kwargs["pixel_values"] = model_kwargs["pixel_values"].index_select(0, expanded_return_idx) elif model_kwargs["image_encoder_embeddings"] is not None: model_kwargs["image_encoder_embeddings"] = model_kwargs["image_encoder_embeddings"].index_select( 0, expanded_return_idx ) elif model_kwargs["perceiver_embeddings"] is not None: model_kwargs["perceiver_embeddings"] = model_kwargs["perceiver_embeddings"].index_select( 0, expanded_return_idx ) return input_ids, model_kwargs def prepare_inputs_for_generation(input_ids, past_key_values=None, **kwargs): token_type_ids = kwargs.get("token_type_ids", None) # only last token for inputs_ids if past is defined in kwargs if past_key_values: input_ids = input_ids[:, -1].unsqueeze(-1) if token_type_ids is not None: token_type_ids = token_type_ids[:, -1].unsqueeze(-1) attention_mask = kwargs.get("attention_mask", None) position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -1].unsqueeze(-1) pixel_values = kwargs.get("pixel_values", None) image_encoder_embeddings = kwargs.get("image_encoder_embeddings", None) perceiver_embeddings = kwargs.get("perceiver_embeddings", None) image_attention_mask = kwargs.get("image_attention_mask", None) interpolate_pos_encoding = kwargs.get("interpolate_pos_encoding", False) return { "input_ids": input_ids, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "position_ids": position_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, "pixel_values": pixel_values, "image_encoder_embeddings": image_encoder_embeddings, "perceiver_embeddings": perceiver_embeddings, "image_attention_mask": image_attention_mask, "interpolate_pos_encoding": interpolate_pos_encoding, } def freeze_model(model, module_exceptions=[]): mapping = { "LayerNorm": nn.LayerNorm, "Linear": nn.Linear, "Embedding": nn.Embedding, } module_exceptions_mapped = [mapping[m] for m in module_exceptions] for module in model.modules(): if module_exceptions and any(isinstance(module, t) for t in module_exceptions_mapped): module.requires_grad_(True) # Explicitely setting it to true to avoid any mistakes else: module.requires_grad_(False) return model class IdeficsDecoupledEmbedding(nn.Embedding): # Derived from https://pytorch.org/docs/stable/_modules/torch/nn/modules/sparse.html#Embedding """ Implements a decoupling of parameters to allow freezing (or not) a subset of the embeddings. In practise, the regular `weight` can be trained or frozen (i.e. `partially_freeze=True`), and if `num_additional_embeddings` > 0, then it will create `num_additional_embeddings` additional parameters that are always trained. If `num_additional_embeddings=0`, then the module defaults back to the regular behavior of `nn.Embedding`. """ def __init__( self, num_embeddings, num_additional_embeddings, embedding_dim, partially_freeze: Optional[bool] = False, device=None, dtype=None, padding_idx=None, **kwargs, ) -> None: """ Args: num_embeddings (`int`): Size of the dictionary of embeddings num_additional_embeddings (`int`): Number of additional embeddings. Only useful when you `partially_freeze=True`. embedding_dim (`int`): The size of each embedding vector partially_freeze: (`bool`, *optional*, defaults to `False`): If `True`, the regular `weight` will be frozen. `additional_weight` is never frozen. padding_idx (`int`, *optional*): The padding index (needs to be less than num_embeddings) Note: there are a lot of other parameters to initialize a standard `nn.Embedding` such as `padding_idx`, `max_norm` or `norm_type`. We are not supporting these. """ if padding_idx is not None and padding_idx > num_embeddings: raise ValueError(f"padding_idx must be within num_embeddings. Got {padding_idx} and {num_embeddings}") super().__init__( num_embeddings=num_embeddings, embedding_dim=embedding_dim, device=device, dtype=dtype, padding_idx=padding_idx, **kwargs, ) self.num_embeddings = num_embeddings self.padding_idx = padding_idx self.num_additional_embeddings = num_additional_embeddings self.partially_freeze = partially_freeze if partially_freeze: self.weight.requires_grad_(False) if self.num_additional_embeddings > 0: self.additional_embedding = nn.Embedding( num_embeddings=self.num_additional_embeddings, embedding_dim=embedding_dim, device=device, dtype=dtype, ) def forward(self, input_ids): """ we have 2 embeddings, with different indices - one pretrained self.weight and another self.additional_embedding.weight that is being trained. in order to make a lookup of the input ids, we: 1. find out the indices of the entries belonging to the 2nd embedding 2. extract those values while subtracting the size of the first embedding (num_embeddings), since the 2nd embedding starts from 0 and not num_embeddings 3. perform the 2nd embedding lookup 4. now we handle the 1st embedding, we overwrite indices belonging to the 2nd embedding with a padding index 5. perform the 1st embedding lookup 6. now we overwrite the values in the 1st embedding lookup with the values of the 2nd embedding lookup note: for the 1st embedding lookup we could have looked up only the low indices and not do the padding, but then we have to create a new tensor and populate it with 2 tensors that are spread out across various indices - i.e. not a simple concat - I haven't benchmarked the complex case if it's any faster, given that seqlens are usually relatively short it's probably not faster or if faster not by much - but might be a good idea to measure. """ if self.num_additional_embeddings == 0: return F.embedding(input_ids, self.weight) # Clone so that we don't modify the original input_ids later on input_ids = input_ids.clone() additional_vocab_indices = torch.where(input_ids >= self.num_embeddings) input_ids_additional_vocab = input_ids[additional_vocab_indices] additional_embeddings = self.additional_embedding(input_ids_additional_vocab - self.num_embeddings) # for successful lookup replace input_ids with 0, the results of these will be discarded anyway input_ids[additional_vocab_indices] = 0 full_vector = F.embedding(input_ids, self.weight) # overwrite the records with high indices full_vector[additional_vocab_indices] = additional_embeddings return full_vector def extra_repr(self) -> str: return "num_embeddings={}, num_additional_embeddings={}, embedding_dim={}, partially_freeze={}".format( self.num_embeddings, self.num_additional_embeddings, self.embedding_dim, self.partially_freeze, ) class IdeficsDecoupledLinear(nn.Linear): # Derived from https://pytorch.org/docs/stable/_modules/torch/nn/modules/linear.html#Linear """ Implements a decoupling of parameters to allow freezing (or not) a subset of the parameters. In practise, the regular `weight` can be trained or frozen (i.e. `partially_freeze=True`), and if `out_additional_features` > 0, then it will create `out_additional_features * in_features` additional parameters that are always trained. If `out_additional_features=0`, then the module defaults back to the regular behavior of `nn.Linear`. """ def __init__( self, in_features: int, out_features: int, out_additional_features: int = 0, bias: bool = True, partially_freeze: bool = True, device=None, dtype=None, ) -> None: """ out_additional_features: int. Number of additional trainable dimensions. Only makes sense when `partially_freeze=True`. partially_freeze: bool. If True, the regular `weight` will be frozen and extra parameters (if any) will be trainable. If False, default to the regular behavior of nn.Linear. """ super().__init__(in_features, out_features, bias, device, dtype) self.out_additional_features = out_additional_features self.partially_freeze = partially_freeze self.in_features = in_features self.out_features = out_features if partially_freeze: self.weight.requires_grad_(False) if bias: self.bias.requires_grad_(False) if out_additional_features > 0: self.additional_fc = nn.Linear( in_features=in_features, out_features=out_additional_features, bias=bias, device=device, dtype=dtype, ) def forward(self, input: torch.Tensor) -> torch.Tensor: output = F.linear(input, self.weight, self.bias) if self.out_additional_features > 0: additional_features = self.additional_fc(input) output = torch.cat((output, additional_features), -1) return output def extra_repr(self) -> str: """Overwriting `nn.Linear.extra_repr` to include new parameters.""" return "in_features={}, out_features={}, out_additional_features={}, bias={}, partially_freeze={}".format( self.in_features, self.out_features, self.out_additional_features, self.bias is not None, self.partially_freeze, ) # this was adapted from LlamaRMSNorm class IdeficsRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ IdeficsRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states ALL_LAYERNORM_LAYERS.append(IdeficsRMSNorm) # this was adapted from LlamaRotaryEmbedding class IdeficsEmbedding(torch.nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) # Build here to make `torch.jit.trace` work. self._set_cos_sin_cache( seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() ) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq) freqs = torch.einsum("i,j->ij", t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) def forward(self, x, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] if seq_len > self.max_seq_len_cached: self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) return ( self.cos_cached[:seq_len].to(dtype=x.dtype), self.sin_cached[:seq_len].to(dtype=x.dtype), ) def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) # Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`): The position indices of the tokens corresponding to the query and key tensors. For example, this can be used to pass offsetted position ids when working with a KV-cache. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos[position_ids].unsqueeze(unsqueeze_dim) sin = sin[position_ids].unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed # this was adapted from LlamaMLP class IdeficsMLP(nn.Module): def __init__( self, hidden_size: int, intermediate_size: int, hidden_act: str, ): super().__init__() self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False) self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False) self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False) self.act_fn = ACT2FN[hidden_act] def forward(self, x): return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) # this was adapted from LlamaAttention class IdeficsAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, hidden_size: int, num_heads: int, dropout: float = 0.0, is_cross_attention: bool = False, config: PretrainedConfig = None, qk_layer_norms: bool = False, ): super().__init__() self.hidden_size = hidden_size self.num_heads = num_heads self.head_dim = hidden_size // num_heads self.dropout = dropout self.is_causal = True if (self.head_dim * num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {num_heads})." ) self.is_cross_attention = is_cross_attention if not hasattr(nn.functional, "scaled_dot_product_attention"): raise ValueError("this model requires pytorch 2.0 or higher") if self.is_cross_attention: kv_input_dim = ( self.hidden_size if not hasattr(config.vision_config, "embed_dim") else config.vision_config.embed_dim ) self.q_proj = nn.Linear( self.hidden_size, num_heads * self.head_dim, bias=False, ) self.k_proj = nn.Linear(kv_input_dim, num_heads * self.head_dim, bias=False) self.v_proj = nn.Linear( kv_input_dim, num_heads * self.head_dim, bias=False, ) else: self.q_proj = nn.Linear( self.hidden_size, num_heads * self.head_dim, bias=False, ) self.k_proj = nn.Linear( self.hidden_size, num_heads * self.head_dim, bias=False, ) self.v_proj = nn.Linear( self.hidden_size, num_heads * self.head_dim, bias=False, ) self.o_proj = nn.Linear( num_heads * self.head_dim, hidden_size, bias=False, ) self.rotary_emb = IdeficsEmbedding(self.head_dim) self.qk_layer_norms = qk_layer_norms if self.qk_layer_norms: self.q_layer_norm = IdeficsRMSNorm(self.head_dim, eps=config.rms_norm_eps) self.k_layer_norm = IdeficsRMSNorm(self.head_dim, eps=config.rms_norm_eps) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: # if key_value_states are provided this layer is used as a cross-attention layer is_cross_attention = self.is_cross_attention or key_value_states is not None bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) if not is_cross_attention: key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) else: _, kv_len, _ = key_value_states.size() # Note that, in this case, `kv_len` == `kv_seq_len` key_states = self.k_proj(key_value_states).view(bsz, kv_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = ( self.v_proj(key_value_states).view(bsz, kv_len, self.num_heads, self.head_dim).transpose(1, 2) ) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] if not is_cross_attention: cos, sin = self.rotary_emb(value_states, seq_len=max(kv_seq_len, q_len)) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) # [bsz, nh, t, hd] if past_key_value is not None: # reuse k, v, self_attention key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) past_key_value = (key_states, value_states) if use_cache else None if self.qk_layer_norms: query_states = self.q_layer_norm(query_states) key_states = self.k_layer_norm(key_states) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, # Reference: https://github.com/pytorch/pytorch/issues/112577. if query_states.device.type == "cuda" and attention_mask is not None: query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() attn_output = nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=attention_mask, dropout_p=self.dropout, # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1. is_causal=self.is_causal and attention_mask is None and q_len > 1, ) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) attn_weights = None if output_attentions: logger.warning_once( "attn_weights are not extracted in scaled_dot_product_attention. The model returns None instead" ) return attn_output, attn_weights, past_key_value # this was adapted from LlamaDecoderLayer class IdeficsDecoderLayer(nn.Module): def __init__(self, config: IdeficsConfig): super().__init__() self.hidden_size = config.hidden_size self.self_attn = IdeficsAttention( hidden_size=self.hidden_size, num_heads=config.num_attention_heads, dropout=config.dropout, config=config, ) self.mlp = IdeficsMLP( hidden_size=self.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, ) self.input_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.dropout = config.dropout def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs class IdeficsGatedCrossAttentionLayer(nn.Module): def __init__(self, config: IdeficsConfig): super().__init__() self.hidden_size = config.hidden_size self.cross_attn = IdeficsAttention( hidden_size=self.hidden_size, num_heads=config.num_attention_heads, is_cross_attention=True, dropout=config.dropout, config=config, qk_layer_norms=config.qk_layer_norms, ) self.mlp = IdeficsMLP( hidden_size=self.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, ) self.input_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.config = config.dropout self.act_cross_attn = nn.Tanh() self.act_dense = nn.Tanh() if config.alpha_initializer == "zeros": if config.alpha_type == "vector": self.alpha_cross_attn = nn.Parameter(torch.zeros(1, 1, self.hidden_size)) self.alpha_dense = nn.Parameter(torch.zeros(1, 1, self.hidden_size)) elif config.alpha_type == "float": self.alpha_cross_attn = nn.Parameter(torch.zeros(1)) self.alpha_dense = nn.Parameter(torch.zeros(1)) else: raise ValueError(f"Unknown value for `alpha_type` ({config.alpha_type})") elif config.alpha_initializer == "ones": if config.alpha_type == "vector": self.alpha_cross_attn = nn.Parameter(torch.ones(1, 1, self.hidden_size)) self.alpha_dense = nn.Parameter(torch.ones(1, 1, self.hidden_size)) elif config.alpha_type == "float": self.alpha_cross_attn = nn.Parameter(torch.ones(1)) self.alpha_dense = nn.Parameter(torch.ones(1)) else: raise ValueError(f"Unknown value for `alpha_type` ({config.alpha_type})") elif config.alpha_initializer in {"normal", "gaussian", "random"}: if config.alpha_type == "vector": self.alpha_cross_attn = nn.Parameter( torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1, 1, self.hidden_size)) ) self.alpha_dense = nn.Parameter( torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1, 1, self.hidden_size)) ) elif config.alpha_type == "float": self.alpha_cross_attn = nn.Parameter( torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1)) ) self.alpha_dense = nn.Parameter(torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1))) else: raise ValueError(f"Unknown value for `alpha_type` ({config.alpha_type})") else: raise NotImplementedError(f"Alpha initialization scheme {config.alpha_initializer} not yet implemented!") if not (hasattr(self, "alpha_cross_attn") and hasattr(self, "alpha_dense")): raise ValueError("Alpha parameters not initialized correctly!") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, image_hidden_states: Optional[torch.Tensor] = None, image_attention_mask: Optional[torch.Tensor] = None, cross_attention_gate: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, past_key_value: Optional[Tuple[torch.Tensor]] = None, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. image_attention_mask (`torch.FloatTensor`, *optional*): image attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. cross_attention_gate (`torch.FloatTensor`, *optional*): gate of size `(batch, seq_len)` used to zero-out cross-attention output for tokens attending no images. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ if image_hidden_states is None: raise ValueError( "`image_hidden_states` is required for Idefics cross attention module which are visual features to be" " conditioned on." ) if cross_attention_gate is None: raise ValueError( "`cross_attention_gate` is required for Idefics cross attention module to zero-out the cross-attention hidden_states attending to no images." ) if past_key_value is not None: raise NotImplementedError("Past key value states are not implemented for Idefics cross attention module.") residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.cross_attn( hidden_states=hidden_states, key_value_states=image_hidden_states, attention_mask=image_attention_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.config, training=self.training) # Fill in zeros for cross_attention hidden_states of tokens attending to no images hidden_states[cross_attention_gate == 0] = hidden_states[cross_attention_gate == 0].fill_(0) hidden_states = residual + self.act_cross_attn(self.alpha_cross_attn) * hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.config, training=self.training) hidden_states = residual + self.act_dense(self.alpha_dense) * hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs LLAMA_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`IdeficsConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare LLaMA Model outputting raw hidden-states without any specific head on top.", LLAMA_START_DOCSTRING, ) class IdeficsPreTrainedModel(PreTrainedModel): config_class = IdeficsConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["IdeficsDecoderLayer", "IdeficsGatedCrossAttentionLayer"] _supports_sdpa = True def _init_weights(self, module): # important: this ported version of Idefics isn't meant for training from scratch - only # inference and fine-tuning - so the proper init weights code has been removed - the m4 code # base should be used for training from scratch and it contains the correct code. std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() # Adapted from transformers.modeling_utils.PreTrainedModel._check_and_enable_sdpa @classmethod def _check_and_enable_sdpa(cls, config, hard_check_only: bool = False) -> PretrainedConfig: # We remove the checks on `is_torch_sdpa_available()` and `cls._supports_sdpa` as Falcon supports SDPA from torch==2.0.0 (no requirement on 2.1). _is_bettertransformer = getattr(cls, "use_bettertransformer", False) if _is_bettertransformer: return config if not hard_check_only: config._attn_implementation = "sdpa" return config LLAMA_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare LLaMA Model outputting raw hidden-states without any specific head on top.", LLAMA_START_DOCSTRING, ) class IdeficsModel(IdeficsPreTrainedModel): """ Transformer decoder consisting of `config.num_hidden_layers` layers. Each layer is a [`IdeficsDecoderLayer`] Args: config: IdeficsConfig """ def __init__(self, config: IdeficsConfig): super().__init__(config) self.config = config self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = IdeficsDecoupledEmbedding( num_embeddings=config.vocab_size, num_additional_embeddings=config.additional_vocab_size, embedding_dim=config.hidden_size, partially_freeze=config.freeze_text_layers, padding_idx=self.padding_idx, ) self.image_size = config.vision_config.image_size self.vision_config = config.vision_config self.vision_model = IdeficsVisionTransformer(config.vision_config) # Perceiver Resampler if config.use_resampler: perceiver_config = config.perceiver_config self.perceiver_resampler = IdeficsPerceiverResampler( config, config.vision_config.embed_dim, perceiver_config.resampler_depth, perceiver_config.resampler_n_heads, perceiver_config.resampler_head_dim, perceiver_config.resampler_n_latents, ) self.layers = nn.ModuleList([IdeficsDecoderLayer(config) for _ in range(config.num_hidden_layers)]) self.cross_layer_interval = config.cross_layer_interval num_cross_layers = config.num_hidden_layers // self.cross_layer_interval self.gated_cross_attn_layers = nn.ModuleList( [IdeficsGatedCrossAttentionLayer(config) for _ in range(num_cross_layers)] ) self.gradient_checkpointing = False self.norm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps) # Initialize weights and apply final processing self.post_init() self.freeze_relevant_params(config) def freeze_relevant_params(self, config=None): if config is None: config = self.config if config.freeze_text_layers: self.freeze_text_layers(config.freeze_text_module_exceptions) if config.freeze_vision_layers: freeze_model(self.vision_model, module_exceptions=config.freeze_vision_module_exceptions) def freeze_text_layers(self, module_exceptions=[]): for module in [self.layers, self.norm]: freeze_model(module, module_exceptions=module_exceptions) def freeze_vision_layers(self, module_exceptions=[]): freeze_model(self.vision_model, module_exceptions=module_exceptions) def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, image_encoder_embeddings: Optional[torch.FloatTensor] = None, perceiver_embeddings: Optional[torch.FloatTensor] = None, image_attention_mask: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = False, return_dict: Optional[bool] = None, ) -> Union[Tuple, IdeficsBaseModelOutputWithPast]: device = input_ids.device if input_ids is not None else inputs_embeds.device output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") seq_length_with_past = seq_length past_key_values_length = 0 if past_key_values is not None: past_key_values_length = past_key_values[0][0].shape[2] seq_length_with_past = seq_length_with_past + past_key_values_length if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) elif position_ids is None: position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0) if (pixel_values, image_encoder_embeddings, perceiver_embeddings).count(None) != 2: raise ValueError( "Exactly 1 of pixel_values, image_encoder_embeddings or perceiver_embeddings has to be not-None." ) elif pixel_values is not None: pixel_values = pixel_values.to(dtype=self.dtype, device=device) # fp16 compatibility batch_size, num_images = pixel_values.shape[:2] pixel_values = pixel_values.contiguous().view(batch_size * num_images, *pixel_values.shape[2:]) # Get sequence from the vision encoder image_hidden_states = self.vision_model( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding ).last_hidden_state elif image_encoder_embeddings is not None: batch_size, num_images, image_seq_len, image_hidden_size = image_encoder_embeddings.size() image_hidden_states = image_encoder_embeddings.to(dtype=self.dtype, device=device) image_hidden_states = image_hidden_states.view(batch_size * num_images, image_seq_len, image_hidden_size) if self.config.use_resampler: if perceiver_embeddings is None: perceiver_embeddings = self.perceiver_resampler(image_hidden_states) image_seq_len, image_hidden_size = perceiver_embeddings.size(1), perceiver_embeddings.size(2) else: batch_size, num_images, image_seq_len, image_hidden_size = perceiver_embeddings.size() image_hidden_states = perceiver_embeddings elif perceiver_embeddings is None: image_seq_len, image_hidden_size = image_hidden_states.size(1), image_hidden_states.size(2) else: raise ValueError("If `perceiver_embeddings` are passed, use_resampler should be True") image_hidden_states = image_hidden_states.view(batch_size, num_images * image_seq_len, image_hidden_size) # # Hack to use the model in full language modeling mode # image_attention_mask = torch.zeros(batch_size, seq_length, 1, dtype=torch.long, device=image_hidden_states.device) # Make image_attention_mask compatible with hidden states text_seq_len = image_attention_mask.size(1) image_attention_mask = image_attention_mask.unsqueeze(-1) image_attention_mask = image_attention_mask.repeat(1, 1, 1, image_seq_len) image_attention_mask = image_attention_mask.view(batch_size, text_seq_len, num_images * image_seq_len) if image_hidden_states is not None: image_batch_size, image_sequence_length, _ = image_hidden_states.size() image_hidden_shape = (image_batch_size, image_sequence_length) if image_attention_mask is None: image_attention_mask = torch.ones(image_hidden_shape, device=device) image_attention_mask = self.invert_attention_mask(image_attention_mask) else: image_attention_mask = None # cross_attention_gate: # For any tokens attending to no images, the hidden_states comming out of the cross-attention should be zeroed-out. # `image_attention_mask` has shape [bsz, 1, num_images, hidden_size] with elements equal to either 0.0 or a very negative number. # If any of the elements are 0.0, then the token is attending to at least one image and the gate value is 1. Otherwise the gate value is 0. # `cross_attention_gate` has shape [bsz, seq_len] with elements equal to either 0.0 or 1.0. cross_attention_gate = ((((image_attention_mask == 0.0).any(dim=-1)).to(dtype=self.dtype)).squeeze(dim=1)).to( device ) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) # embed positions if attention_mask is None: attention_mask = torch.ones( (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device ) attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length ) hidden_states = inputs_embeds if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = () if use_cache else None for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) past_key_value = past_key_values[idx] if past_key_values is not None else None def vblock( main_block, hidden_states, attention_mask, position_ids, past_key_value, image_hidden_states, image_attention_mask, cross_attention_gate, output_attentions, use_cache, layer_idx, cross_layer_interval, gated_cross_attn_layers, ): # TODO(ls): Add cross attention values to respective lists if layer_idx % cross_layer_interval == 0: xblock = gated_cross_attn_layers[layer_idx // cross_layer_interval] outputs = xblock( hidden_states, attention_mask=attention_mask, image_hidden_states=image_hidden_states, image_attention_mask=image_attention_mask, cross_attention_gate=cross_attention_gate, output_attentions=output_attentions, use_cache=use_cache, past_key_value=None, # not implemented ) hidden_states = outputs[0] layer_outputs = main_block( hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) return layer_outputs if self.gradient_checkpointing and self.training: past_key_value = None if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False layer_outputs = self._gradient_checkpointing_func( vblock, decoder_layer, hidden_states, attention_mask, position_ids, past_key_value, image_hidden_states, image_attention_mask, cross_attention_gate, output_attentions, use_cache, idx, self.cross_layer_interval, self.gated_cross_attn_layers, ) else: layer_outputs = vblock( decoder_layer, hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, image_hidden_states=image_hidden_states, image_attention_mask=image_attention_mask, cross_attention_gate=cross_attention_gate, output_attentions=output_attentions, use_cache=use_cache, layer_idx=idx, cross_layer_interval=self.cross_layer_interval, gated_cross_attn_layers=self.gated_cross_attn_layers, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None image_hidden_states = image_hidden_states.view(batch_size, num_images, image_seq_len, image_hidden_size) if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, image_hidden_states] if v is not None ) return IdeficsBaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, image_hidden_states=image_hidden_states, ) class IdeficsForVisionText2Text(IdeficsPreTrainedModel): _keys_to_ignore_on_load_missing = [r"lm_head.weight"] _tied_weights_keys = ["model.embed_tokens.weight", "lm_head.weight"] def __init__(self, config, vision_model=None): super().__init__(config) self.model = IdeficsModel(config) self.lm_head = IdeficsDecoupledLinear( in_features=config.hidden_size, out_features=config.vocab_size, out_additional_features=config.additional_vocab_size, bias=False, partially_freeze=config.freeze_lm_head, ) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model def tie_weights(self): """ Overwrite `transformers.modeling_utils.PreTrainedModel.tie_weights` to handle the case of IdeficsDecoupledLinear and IdeficsDecoupledEmbedding. """ output_embeddings = self.get_output_embeddings() input_embeddings = self.get_input_embeddings() if getattr(self.config, "tie_word_embeddings", True): output_embeddings.weight = input_embeddings.weight if input_embeddings.num_additional_embeddings > 0: assert output_embeddings.out_additional_features == input_embeddings.num_additional_embeddings output_embeddings.additional_fc.weight = input_embeddings.additional_embedding.weight if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"): output_embeddings.out_features = input_embeddings.num_embeddings if hasattr(output_embeddings, "out_additional_features") and hasattr( input_embeddings, "num_additional_embeddings" ): output_embeddings.out_additional_features = input_embeddings.num_additional_embeddings @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=IdeficsCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, image_encoder_embeddings: Optional[torch.FloatTensor] = None, perceiver_embeddings: Optional[torch.FloatTensor] = None, image_attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = False, return_dict: Optional[bool] = None, ) -> Union[Tuple, IdeficsCausalLMOutputWithPast]: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> from transformers import AutoProcessor, IdeficsForVisionText2Text >>> model = IdeficsForVisionText2Text.from_pretrained("HuggingFaceM4/idefics-9b") >>> processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics-9b") >>> dogs_image_url_1 = "https://huggingface.co/datasets/hf-internal-testing/fixtures_nlvr2/raw/main/image1.jpeg" >>> dogs_image_url_2 = "https://huggingface.co/datasets/hf-internal-testing/fixtures_nlvr2/raw/main/image2.jpeg" >>> prompts = [ ... [ ... "User:", ... dogs_image_url_1, ... "Describe this image.\nAssistant: An image of two dogs.\n", ... "User:", ... dogs_image_url_2, ... "Describe this image.\nAssistant:", ... ] ... ] >>> inputs = processor(prompts, return_tensors="pt") >>> generate_ids = model.generate(**inputs, max_new_tokens=6) >>> processor.batch_decode(generate_ids, skip_special_tokens=True) ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, pixel_values=pixel_values, image_encoder_embeddings=image_encoder_embeddings, perceiver_embeddings=perceiver_embeddings, image_attention_mask=image_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: labels = labels.to(logits.device) # Shift so that tokens < n predict n if attention_mask is not None: shift_attention_mask = attention_mask[..., 1:].to(logits.device) shift_logits = logits[..., :-1, :][shift_attention_mask != 0].contiguous() shift_labels = labels[..., 1:][shift_attention_mask != 0].contiguous() else: shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return IdeficsCausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states, ) def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs): image_hidden_states = kwargs.pop("image_hidden_states", None) if image_hidden_states is not None: if self.config.use_resampler: kwargs["perceiver_embeddings"] = image_hidden_states else: kwargs["image_encoder_embeddings"] = image_hidden_states kwargs["pixel_values"] = None inputs = prepare_inputs_for_generation(input_ids, past=past, **kwargs) unwanted_kwargs = ["token_type_ids"] for kwarg in unwanted_kwargs: inputs.pop(kwarg, None) return inputs @staticmethod def _expand_inputs_for_generation( *args, **model_kwargs, ): return expand_inputs_for_generation(*args, **model_kwargs) def _update_model_kwargs_for_generation( self, outputs: ModelOutput, model_kwargs: Dict[str, Any], is_encoder_decoder: bool = False, standardize_cache_format: bool = False, ) -> Dict[str, Any]: model_kwargs = super()._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder, standardize_cache_format, ) if "image_attention_mask" in model_kwargs: image_attention_mask = model_kwargs["image_attention_mask"] last_mask = image_attention_mask[:, -1, :].unsqueeze(1) model_kwargs["image_attention_mask"] = last_mask # Get the precomputed image_hidden_states model_kwargs["image_hidden_states"] = outputs.image_hidden_states return model_kwargs @staticmethod def _reorder_cache(past, beam_idx): reordered_past = () for layer_past in past: reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) return reordered_past
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py
# coding=utf-8 # Copyright 2022 Google AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Audio Spectogram Transformer (AST) model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) from ..deprecated._archive_maps import AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class ASTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ASTModel`]. It is used to instantiate an AST model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the AST [MIT/ast-finetuned-audioset-10-10-0.4593](https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. frequency_stride (`int`, *optional*, defaults to 10): Frequency stride to use when patchifying the spectrograms. time_stride (`int`, *optional*, defaults to 10): Temporal stride to use when patchifying the spectrograms. max_length (`int`, *optional*, defaults to 1024): Temporal dimension of the spectrograms. num_mel_bins (`int`, *optional*, defaults to 128): Frequency dimension of the spectrograms (number of Mel-frequency bins). Example: ```python >>> from transformers import ASTConfig, ASTModel >>> # Initializing a AST MIT/ast-finetuned-audioset-10-10-0.4593 style configuration >>> configuration = ASTConfig() >>> # Initializing a model (with random weights) from the MIT/ast-finetuned-audioset-10-10-0.4593 style configuration >>> model = ASTModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "audio-spectrogram-transformer" def __init__( self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, patch_size=16, qkv_bias=True, frequency_stride=10, time_stride=10, max_length=1024, num_mel_bins=128, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.patch_size = patch_size self.qkv_bias = qkv_bias self.frequency_stride = frequency_stride self.time_stride = time_stride self.max_length = max_length self.num_mel_bins = num_mel_bins
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py
# coding=utf-8 # Copyright 2022 MIT and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Audio Spectrogram Transformer (AST) model.""" import math from typing import Dict, List, Optional, Set, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, SequenceClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_audio_spectrogram_transformer import ASTConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "ASTConfig" # Base docstring _CHECKPOINT_FOR_DOC = "MIT/ast-finetuned-audioset-10-10-0.4593" _EXPECTED_OUTPUT_SHAPE = [1, 1214, 768] # Audio classification docstring _SEQ_CLASS_CHECKPOINT = "MIT/ast-finetuned-audioset-10-10-0.4593" _SEQ_CLASS_EXPECTED_OUTPUT = "'Speech'" _SEQ_CLASS_EXPECTED_LOSS = 0.17 from ..deprecated._archive_maps import AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 class ASTEmbeddings(nn.Module): """ Construct the CLS token, position and patch embeddings. """ def __init__(self, config: ASTConfig) -> None: super().__init__() self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.distillation_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.patch_embeddings = ASTPatchEmbeddings(config) frequency_out_dimension, time_out_dimension = self.get_shape(config) num_patches = frequency_out_dimension * time_out_dimension self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 2, config.hidden_size)) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.config = config def get_shape(self, config): # see Karpathy's cs231n blog on how to calculate the output dimensions # https://cs231n.github.io/convolutional-networks/#conv frequency_out_dimension = (config.num_mel_bins - config.patch_size) // config.frequency_stride + 1 time_out_dimension = (config.max_length - config.patch_size) // config.time_stride + 1 return frequency_out_dimension, time_out_dimension def forward(self, input_values: torch.Tensor) -> torch.Tensor: batch_size = input_values.shape[0] embeddings = self.patch_embeddings(input_values) cls_tokens = self.cls_token.expand(batch_size, -1, -1) distillation_tokens = self.distillation_token.expand(batch_size, -1, -1) embeddings = torch.cat((cls_tokens, distillation_tokens, embeddings), dim=1) embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings class ASTPatchEmbeddings(nn.Module): """ This class turns `input_values` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config): super().__init__() patch_size = config.patch_size frequency_stride = config.frequency_stride time_stride = config.time_stride self.projection = nn.Conv2d( 1, config.hidden_size, kernel_size=(patch_size, patch_size), stride=(frequency_stride, time_stride) ) def forward(self, input_values: torch.Tensor) -> torch.Tensor: input_values = input_values.unsqueeze(1) input_values = input_values.transpose(2, 3) embeddings = self.projection(input_values).flatten(2).transpose(1, 2) return embeddings # Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->AST class ASTSelfAttention(nn.Module): def __init__(self, config: ASTConfig) -> None: super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size,} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->AST class ASTSelfOutput(nn.Module): """ The residual connection is defined in ASTLayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config: ASTConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->AST class ASTAttention(nn.Module): def __init__(self, config: ASTConfig) -> None: super().__init__() self.attention = ASTSelfAttention(config) self.output = ASTSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads: Set[int]) -> None: if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads ) # Prune linear layers self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_outputs = self.attention(hidden_states, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.vit.modeling_vit.ViTIntermediate with ViT->AST class ASTIntermediate(nn.Module): def __init__(self, config: ASTConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTOutput with ViT->AST class ASTOutput(nn.Module): def __init__(self, config: ASTConfig) -> None: super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTLayer with ViT->AST class ASTLayer(nn.Module): """This corresponds to the Block class in the timm implementation.""" def __init__(self, config: ASTConfig) -> None: super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = ASTAttention(config) self.intermediate = ASTIntermediate(config) self.output = ASTOutput(config) self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_attention_outputs = self.attention( self.layernorm_before(hidden_states), # in AST, layernorm is applied before self-attention head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # first residual connection hidden_states = attention_output + hidden_states # in AST, layernorm is also applied after self-attention layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) # second residual connection is done here layer_output = self.output(layer_output, hidden_states) outputs = (layer_output,) + outputs return outputs # Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->AST class ASTEncoder(nn.Module): def __init__(self, config: ASTConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList([ASTLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, layer_head_mask, output_attentions, ) else: layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class ASTPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ASTConfig base_model_prefix = "audio_spectrogram_transformer" main_input_name = "input_values" supports_gradient_checkpointing = True # Copied from transformers.models.deit.modeling_deit.DeiTPreTrainedModel._init_weights def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid # `trunc_normal_cpu` not implemented in `half` issues module.weight.data = nn.init.trunc_normal_( module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range ).to(module.weight.dtype) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) AUDIO_SPECTROGRAM_TRANSFORMER_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ASTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ AUDIO_SPECTROGRAM_TRANSFORMER_INPUTS_DOCSTRING = r""" Args: input_values (`torch.FloatTensor` of shape `(batch_size, max_length, num_mel_bins)`): Float values mel features extracted from the raw audio waveform. Raw audio waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the mel features, padding and conversion into a tensor of type `torch.FloatTensor`. See [`~ASTFeatureExtractor.__call__`] head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare AST Model transformer outputting raw hidden-states without any specific head on top.", AUDIO_SPECTROGRAM_TRANSFORMER_START_DOCSTRING, ) class ASTModel(ASTPreTrainedModel): def __init__(self, config: ASTConfig) -> None: super().__init__(config) self.config = config self.embeddings = ASTEmbeddings(config) self.encoder = ASTEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> ASTPatchEmbeddings: return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(AUDIO_SPECTROGRAM_TRANSFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, modality="audio", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, input_values: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_values is None: raise ValueError("You have to specify input_values") # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(input_values) encoder_outputs = self.encoder( embedding_output, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = (sequence_output[:, 0] + sequence_output[:, 1]) / 2 if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class ASTMLPHead(nn.Module): def __init__(self, config: ASTConfig): super().__init__() self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dense = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() def forward(self, hidden_state): hidden_state = self.layernorm(hidden_state) hidden_state = self.dense(hidden_state) return hidden_state @add_start_docstrings( """ Audio Spectrogram Transformer model with an audio classification head on top (a linear layer on top of the pooled output) e.g. for datasets like AudioSet, Speech Commands v2. """, AUDIO_SPECTROGRAM_TRANSFORMER_START_DOCSTRING, ) class ASTForAudioClassification(ASTPreTrainedModel): def __init__(self, config: ASTConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.audio_spectrogram_transformer = ASTModel(config) # Classifier head self.classifier = ASTMLPHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(AUDIO_SPECTROGRAM_TRANSFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_SEQ_CLASS_CHECKPOINT, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, modality="audio", expected_output=_SEQ_CLASS_EXPECTED_OUTPUT, expected_loss=_SEQ_CLASS_EXPECTED_LOSS, ) def forward( self, input_values: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the audio classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.audio_spectrogram_transformer( input_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/audio_spectrogram_transformer/convert_audio_spectrogram_transformer_original_to_pytorch.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Audio Spectrogram Transformer checkpoints from the original repository. URL: https://github.com/YuanGongND/ast""" import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_audio_spectrogram_transformer_config(model_name): config = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: config.max_length = 128 elif "12-12" in model_name: config.time_stride = 12 config.frequency_stride = 12 elif "14-14" in model_name: config.time_stride = 14 config.frequency_stride = 14 elif "16-16" in model_name: config.time_stride = 16 config.frequency_stride = 16 else: raise ValueError("Model not supported") repo_id = "huggingface/label-files" if "speech-commands" in model_name: config.num_labels = 35 filename = "speech-commands-v2-id2label.json" else: config.num_labels = 527 filename = "audioset-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} return config def rename_key(name): if "module.v" in name: name = name.replace("module.v", "audio_spectrogram_transformer") if "cls_token" in name: name = name.replace("cls_token", "embeddings.cls_token") if "dist_token" in name: name = name.replace("dist_token", "embeddings.distillation_token") if "pos_embed" in name: name = name.replace("pos_embed", "embeddings.position_embeddings") if "patch_embed.proj" in name: name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection") # transformer blocks if "blocks" in name: name = name.replace("blocks", "encoder.layer") if "attn.proj" in name: name = name.replace("attn.proj", "attention.output.dense") if "attn" in name: name = name.replace("attn", "attention.self") if "norm1" in name: name = name.replace("norm1", "layernorm_before") if "norm2" in name: name = name.replace("norm2", "layernorm_after") if "mlp.fc1" in name: name = name.replace("mlp.fc1", "intermediate.dense") if "mlp.fc2" in name: name = name.replace("mlp.fc2", "output.dense") # final layernorm if "audio_spectrogram_transformer.norm" in name: name = name.replace("audio_spectrogram_transformer.norm", "audio_spectrogram_transformer.layernorm") # classifier head if "module.mlp_head.0" in name: name = name.replace("module.mlp_head.0", "classifier.layernorm") if "module.mlp_head.1" in name: name = name.replace("module.mlp_head.1", "classifier.dense") return name def convert_state_dict(orig_state_dict, config): for key in orig_state_dict.copy().keys(): val = orig_state_dict.pop(key) if "qkv" in key: key_split = key.split(".") layer_num = int(key_split[3]) dim = config.hidden_size if "weight" in key: orig_state_dict[ f"audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.query.weight" ] = val[:dim, :] orig_state_dict[ f"audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.key.weight" ] = val[dim : dim * 2, :] orig_state_dict[ f"audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.value.weight" ] = val[-dim:, :] else: orig_state_dict[ f"audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.query.bias" ] = val[:dim] orig_state_dict[ f"audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.key.bias" ] = val[dim : dim * 2] orig_state_dict[ f"audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.value.bias" ] = val[-dim:] else: orig_state_dict[rename_key(key)] = val return orig_state_dict def remove_keys(state_dict): ignore_keys = [ "module.v.head.weight", "module.v.head.bias", "module.v.head_dist.weight", "module.v.head_dist.bias", ] for k in ignore_keys: state_dict.pop(k, None) @torch.no_grad() def convert_audio_spectrogram_transformer_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False): """ Copy/paste/tweak model's weights to our Audio Spectrogram Transformer structure. """ config = get_audio_spectrogram_transformer_config(model_name) model_name_to_url = { "ast-finetuned-audioset-10-10-0.4593": ( "https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.450": ( "https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448": ( "https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448-v2": ( "https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1" ), "ast-finetuned-audioset-12-12-0.447": ( "https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1" ), "ast-finetuned-audioset-14-14-0.443": ( "https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1" ), "ast-finetuned-audioset-16-16-0.442": ( "https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1" ), "ast-finetuned-speech-commands-v2": ( "https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1" ), } # load original state_dict checkpoint_url = model_name_to_url[model_name] state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") # remove some keys remove_keys(state_dict) # rename some keys new_state_dict = convert_state_dict(state_dict, config) # load 🤗 model model = ASTForAudioClassification(config) model.eval() model.load_state_dict(new_state_dict) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 mean = -4.2677393 if "speech-commands" not in model_name else -6.845978 std = 4.5689974 if "speech-commands" not in model_name else 5.5654526 max_length = 1024 if "speech-commands" not in model_name else 128 feature_extractor = ASTFeatureExtractor(mean=mean, std=std, max_length=max_length) if "speech-commands" in model_name: dataset = load_dataset("speech_commands", "v0.02", split="validation") waveform = dataset[0]["audio"]["array"] else: filepath = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint", filename="sample_audio.flac", repo_type="dataset", ) waveform, _ = torchaudio.load(filepath) waveform = waveform.squeeze().numpy() inputs = feature_extractor(waveform, sampling_rate=16000, return_tensors="pt") # forward pass outputs = model(**inputs) logits = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": expected_slice = torch.tensor([-0.8760, -7.0042, -8.6602]) elif model_name == "ast-finetuned-audioset-10-10-0.450": expected_slice = torch.tensor([-1.1986, -7.0903, -8.2718]) elif model_name == "ast-finetuned-audioset-10-10-0.448": expected_slice = torch.tensor([-2.6128, -8.0080, -9.4344]) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": expected_slice = torch.tensor([-1.5080, -7.4534, -8.8917]) elif model_name == "ast-finetuned-audioset-12-12-0.447": expected_slice = torch.tensor([-0.5050, -6.5833, -8.0843]) elif model_name == "ast-finetuned-audioset-14-14-0.443": expected_slice = torch.tensor([-0.3826, -7.0336, -8.2413]) elif model_name == "ast-finetuned-audioset-16-16-0.442": expected_slice = torch.tensor([-1.2113, -6.9101, -8.3470]) elif model_name == "ast-finetuned-speech-commands-v2": expected_slice = torch.tensor([6.1589, -8.0566, -8.7984]) else: raise ValueError("Unknown model name") if not torch.allclose(logits[0, :3], expected_slice, atol=1e-4): raise ValueError("Logits don't match") print("Looks ok!") if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model {model_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving feature extractor to {pytorch_dump_folder_path}") feature_extractor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print("Pushing model and feature extractor to the hub...") model.push_to_hub(f"MIT/{model_name}") feature_extractor.push_to_hub(f"MIT/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="ast-finetuned-audioset-10-10-0.4593", type=str, help="Name of the Audio Spectrogram Transformer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/audio_spectrogram_transformer/__init__.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = { "configuration_audio_spectrogram_transformer": [ "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ASTConfig", ], "feature_extraction_audio_spectrogram_transformer": ["ASTFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_audio_spectrogram_transformer"] = [ "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ASTForAudioClassification", "ASTModel", "ASTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Feature extractor class for Audio Spectrogram Transformer. """ from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, is_speech_available, is_torch_available, logging if is_speech_available(): import torchaudio.compliance.kaldi as ta_kaldi if is_torch_available(): import torch logger = logging.get_logger(__name__) class ASTFeatureExtractor(SequenceFeatureExtractor): r""" Constructs a Audio Spectrogram Transformer (AST) feature extractor. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. This class extracts mel-filter bank features from raw speech using TorchAudio if installed or using numpy otherwise, pads/truncates them to a fixed length and normalizes them using a mean and standard deviation. Args: feature_size (`int`, *optional*, defaults to 1): The feature dimension of the extracted features. sampling_rate (`int`, *optional*, defaults to 16000): The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). num_mel_bins (`int`, *optional*, defaults to 128): Number of Mel-frequency bins. max_length (`int`, *optional*, defaults to 1024): Maximum length to which to pad/truncate the extracted features. do_normalize (`bool`, *optional*, defaults to `True`): Whether or not to normalize the log-Mel features using `mean` and `std`. mean (`float`, *optional*, defaults to -4.2677393): The mean value used to normalize the log-Mel features. Uses the AudioSet mean by default. std (`float`, *optional*, defaults to 4.5689974): The standard deviation value used to normalize the log-Mel features. Uses the AudioSet standard deviation by default. return_attention_mask (`bool`, *optional*, defaults to `False`): Whether or not [`~ASTFeatureExtractor.__call__`] should return `attention_mask`. """ model_input_names = ["input_values", "attention_mask"] def __init__( self, feature_size=1, sampling_rate=16000, num_mel_bins=128, max_length=1024, padding_value=0.0, do_normalize=True, mean=-4.2677393, std=4.5689974, return_attention_mask=False, **kwargs, ): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) self.num_mel_bins = num_mel_bins self.max_length = max_length self.do_normalize = do_normalize self.mean = mean self.std = std self.return_attention_mask = return_attention_mask if not is_speech_available(): mel_filters = mel_filter_bank( num_frequency_bins=256, num_mel_filters=self.num_mel_bins, min_frequency=20, max_frequency=sampling_rate // 2, sampling_rate=sampling_rate, norm=None, mel_scale="kaldi", triangularize_in_mel_space=True, ) self.mel_filters = np.pad(mel_filters, ((0, 1), (0, 0))) self.window = window_function(400, "hann", periodic=False) def _extract_fbank_features( self, waveform: np.ndarray, max_length: int, ) -> np.ndarray: """ Get mel-filter bank features using TorchAudio. Note that TorchAudio requires 16-bit signed integers as inputs and hence the waveform should not be normalized before feature extraction. """ # waveform = waveform * (2**15) # Kaldi compliance: 16-bit signed integers if is_speech_available(): waveform = torch.from_numpy(waveform).unsqueeze(0) fbank = ta_kaldi.fbank( waveform, sample_frequency=self.sampling_rate, window_type="hanning", num_mel_bins=self.num_mel_bins, ) else: waveform = np.squeeze(waveform) fbank = spectrogram( waveform, self.window, frame_length=400, hop_length=160, fft_length=512, power=2.0, center=False, preemphasis=0.97, mel_filters=self.mel_filters, log_mel="log", mel_floor=1.192092955078125e-07, remove_dc_offset=True, ).T fbank = torch.from_numpy(fbank) n_frames = fbank.shape[0] difference = max_length - n_frames # pad or truncate, depending on difference if difference > 0: pad_module = torch.nn.ZeroPad2d((0, 0, 0, difference)) fbank = pad_module(fbank) elif difference < 0: fbank = fbank[0:max_length, :] fbank = fbank.numpy() return fbank def normalize(self, input_values: np.ndarray) -> np.ndarray: return (input_values - (self.mean)) / (self.std * 2) def __call__( self, raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], sampling_rate: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> BatchFeature: """ Main method to featurize and prepare for the model one or several sequence(s). Args: raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`): The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not stereo, i.e. single float per timestep. sampling_rate (`int`, *optional*): The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. """ if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}") is_batched = is_batched_numpy or ( isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list))) ) if is_batched: raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech] elif not is_batched and not isinstance(raw_speech, np.ndarray): raw_speech = np.asarray(raw_speech, dtype=np.float32) elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64): raw_speech = raw_speech.astype(np.float32) # always return batch if not is_batched: raw_speech = [raw_speech] # extract fbank features and pad/truncate to max_length features = [self._extract_fbank_features(waveform, max_length=self.max_length) for waveform in raw_speech] # convert into BatchFeature padded_inputs = BatchFeature({"input_values": features}) # make sure list is in array format input_values = padded_inputs.get("input_values") if isinstance(input_values[0], list): padded_inputs["input_values"] = [np.asarray(feature, dtype=np.float32) for feature in input_values] # normalization if self.do_normalize: padded_inputs["input_values"] = [self.normalize(feature) for feature in input_values] if return_tensors is not None: padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/donut/image_processing_donut.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Donut.""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( get_resize_output_image_size, pad, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_kwargs, validate_preprocess_arguments, ) from ...utils import TensorType, logging from ...utils.import_utils import is_vision_available logger = logging.get_logger(__name__) if is_vision_available(): import PIL class DonutImageProcessor(BaseImageProcessor): r""" Constructs a Donut image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_thumbnail (`bool`, *optional*, defaults to `True`): Whether to resize the image using thumbnail method. do_align_long_axis (`bool`, *optional*, defaults to `False`): Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees. do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the image. If `random_padding` is set to `True` in `preprocess`, each image is padded with a random amont of padding on each size, up to the largest image size in the batch. Otherwise, all images are padded to the largest image size in the batch. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Image standard deviation. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_thumbnail: bool = True, do_align_long_axis: bool = False, do_pad: bool = True, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 2560, "width": 1920} if isinstance(size, (tuple, list)): # The previous feature extractor size parameter was in (width, height) format size = size[::-1] size = get_size_dict(size) self.do_resize = do_resize self.size = size self.resample = resample self.do_thumbnail = do_thumbnail self.do_align_long_axis = do_align_long_axis self.do_pad = do_pad self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD self._valid_processor_keys = [ "images", "do_resize", "size", "resample", "do_thumbnail", "do_align_long_axis", "do_pad", "random_padding", "do_rescale", "rescale_factor", "do_normalize", "image_mean", "image_std", "return_tensors", "data_format", "input_data_format", ] def align_long_axis( self, image: np.ndarray, size: Dict[str, int], data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Align the long axis of the image to the longest axis of the specified size. Args: image (`np.ndarray`): The image to be aligned. size (`Dict[str, int]`): The size `{"height": h, "width": w}` to align the long axis to. data_format (`str` or `ChannelDimension`, *optional*): The data format of the output image. If unset, the same format as the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. Returns: `np.ndarray`: The aligned image. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) output_height, output_width = size["height"], size["width"] if (output_width < output_height and input_width > input_height) or ( output_width > output_height and input_width < input_height ): image = np.rot90(image, 3) if data_format is not None: image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image def pad_image( self, image: np.ndarray, size: Dict[str, int], random_padding: bool = False, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Pad the image to the specified size. Args: image (`np.ndarray`): The image to be padded. size (`Dict[str, int]`): The size `{"height": h, "width": w}` to pad the image to. random_padding (`bool`, *optional*, defaults to `False`): Whether to use random padding or not. data_format (`str` or `ChannelDimension`, *optional*): The data format of the output image. If unset, the same format as the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ output_height, output_width = size["height"], size["width"] input_height, input_width = get_image_size(image, channel_dim=input_data_format) delta_width = output_width - input_width delta_height = output_height - input_height if random_padding: pad_top = np.random.randint(low=0, high=delta_height + 1) pad_left = np.random.randint(low=0, high=delta_width + 1) else: pad_top = delta_height // 2 pad_left = delta_width // 2 pad_bottom = delta_height - pad_top pad_right = delta_width - pad_left padding = ((pad_top, pad_bottom), (pad_left, pad_right)) return pad(image, padding, data_format=data_format, input_data_format=input_data_format) def pad(self, *args, **kwargs): logger.info("pad is deprecated and will be removed in version 4.27. Please use pad_image instead.") return self.pad_image(*args, **kwargs) def thumbnail( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize the image to make a thumbnail. The image is resized so that no dimension is larger than any corresponding dimension of the specified size. Args: image (`np.ndarray`): The image to be resized. size (`Dict[str, int]`): The size `{"height": h, "width": w}` to resize the image to. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): The resampling filter to use. data_format (`Optional[Union[str, ChannelDimension]]`, *optional*): The data format of the output image. If unset, the same format as the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) output_height, output_width = size["height"], size["width"] # We always resize to the smallest of either the input or output size. height = min(input_height, output_height) width = min(input_width, output_width) if height == input_height and width == input_width: return image if input_height > input_width: width = int(input_width * height / input_height) elif input_width > input_height: height = int(input_height * width / input_width) return resize( image, size=(height, width), resample=resample, reducing_gap=2.0, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resizes `image` to `(height, width)` specified by `size` using the PIL library. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ size = get_size_dict(size) shortest_edge = min(size["height"], size["width"]) output_size = get_resize_output_image_size( image, size=shortest_edge, default_to_square=False, input_data_format=input_data_format ) resized_image = resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) return resized_image def preprocess( self, images: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_thumbnail: bool = None, do_align_long_axis: bool = None, do_pad: bool = None, random_padding: bool = False, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to min(size["height"], size["width"]) with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_thumbnail (`bool`, *optional*, defaults to `self.do_thumbnail`): Whether to resize the image using thumbnail method. do_align_long_axis (`bool`, *optional*, defaults to `self.do_align_long_axis`): Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the image. If `random_padding` is set to `True`, each image is padded with a random amont of padding on each size, up to the largest image size in the batch. Otherwise, all images are padded to the largest image size in the batch. random_padding (`bool`, *optional*, defaults to `self.random_padding`): Whether to use random padding when padding the image. If `True`, each image in the batch with be padded with a random amount of padding on each side up to the size of the largest image in the batch. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image pixel values. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: defaults to the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size if isinstance(size, (tuple, list)): # Previous feature extractor had size in (width, height) format size = size[::-1] size = get_size_dict(size) resample = resample if resample is not None else self.resample do_thumbnail = do_thumbnail if do_thumbnail is not None else self.do_thumbnail do_align_long_axis = do_align_long_axis if do_align_long_axis is not None else self.do_align_long_axis do_pad = do_pad if do_pad is not None else self.do_pad do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std images = make_list_of_images(images) validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_pad=do_pad, size_divisibility=size, # There is no pad divisibility in this processor, but pad requires the size arg. do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_align_long_axis: images = [self.align_long_axis(image, size=size, input_data_format=input_data_format) for image in images] if do_resize: images = [ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] if do_thumbnail: images = [self.thumbnail(image=image, size=size, input_data_format=input_data_format) for image in images] if do_pad: images = [ self.pad_image( image=image, size=size, random_padding=random_padding, input_data_format=input_data_format ) for image in images ] if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/donut/convert_donut_to_pytorch.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Donut checkpoints using the original `donut-python` library. URL: https://github.com/clovaai/donut""" import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def get_configs(model): original_config = model.config encoder_config = DonutSwinConfig( image_size=original_config.input_size, patch_size=4, depths=original_config.encoder_layer, num_heads=[4, 8, 16, 32], window_size=original_config.window_size, embed_dim=128, ) decoder_config = MBartConfig( is_decoder=True, is_encoder_decoder=False, add_cross_attention=True, decoder_layers=original_config.decoder_layer, max_position_embeddings=original_config.max_position_embeddings, vocab_size=len( model.decoder.tokenizer ), # several special tokens are added to the vocab of XLMRobertaTokenizer, see repo on the hub (added_tokens.json) scale_embedding=True, add_final_layer_norm=True, ) return encoder_config, decoder_config def rename_key(name): if "encoder.model" in name: name = name.replace("encoder.model", "encoder") if "decoder.model" in name: name = name.replace("decoder.model", "decoder") if "patch_embed.proj" in name: name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection") if "patch_embed.norm" in name: name = name.replace("patch_embed.norm", "embeddings.norm") if name.startswith("encoder"): if "layers" in name: name = "encoder." + name if "attn.proj" in name: name = name.replace("attn.proj", "attention.output.dense") if "attn" in name and "mask" not in name: name = name.replace("attn", "attention.self") if "norm1" in name: name = name.replace("norm1", "layernorm_before") if "norm2" in name: name = name.replace("norm2", "layernorm_after") if "mlp.fc1" in name: name = name.replace("mlp.fc1", "intermediate.dense") if "mlp.fc2" in name: name = name.replace("mlp.fc2", "output.dense") if name == "encoder.norm.weight": name = "encoder.layernorm.weight" if name == "encoder.norm.bias": name = "encoder.layernorm.bias" return name def convert_state_dict(orig_state_dict, model): for key in orig_state_dict.copy().keys(): val = orig_state_dict.pop(key) if "qkv" in key: key_split = key.split(".") layer_num = int(key_split[3]) block_num = int(key_split[5]) dim = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: orig_state_dict[ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.weight" ] = val[:dim, :] orig_state_dict[ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.weight" ] = val[dim : dim * 2, :] orig_state_dict[ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.weight" ] = val[-dim:, :] else: orig_state_dict[ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.bias" ] = val[:dim] orig_state_dict[ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.bias" ] = val[dim : dim * 2] orig_state_dict[ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.bias" ] = val[-dim:] elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: orig_state_dict[rename_key(key)] = val return orig_state_dict def convert_donut_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False): # load original model original_model = DonutModel.from_pretrained(model_name).eval() # load HuggingFace model encoder_config, decoder_config = get_configs(original_model) encoder = DonutSwinModel(encoder_config) decoder = MBartForCausalLM(decoder_config) model = VisionEncoderDecoderModel(encoder=encoder, decoder=decoder) model.eval() state_dict = original_model.state_dict() new_state_dict = convert_state_dict(state_dict, model) model.load_state_dict(new_state_dict) # verify results on scanned document dataset = load_dataset("hf-internal-testing/example-documents") image = dataset["test"][0]["image"].convert("RGB") tokenizer = XLMRobertaTokenizerFast.from_pretrained(model_name, from_slow=True) image_processor = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis, size=original_model.config.input_size[::-1] ) processor = DonutProcessor(image_processor, tokenizer) pixel_values = processor(image, return_tensors="pt").pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": task_prompt = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" question = "When is the coffee break?" task_prompt = task_prompt.replace("{user_input}", question) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": task_prompt = "<s_rvlcdip>" elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: task_prompt = "<s_cord>" elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": task_prompt = "s_cord-v2>" elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": task_prompt = "<s_zhtrainticket>" elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt task_prompt = "hello world" else: raise ValueError("Model name not supported") prompt_tensors = original_model.decoder.tokenizer(task_prompt, add_special_tokens=False, return_tensors="pt")[ "input_ids" ] original_patch_embed = original_model.encoder.model.patch_embed(pixel_values) patch_embeddings, _ = model.encoder.embeddings(pixel_values) assert torch.allclose(original_patch_embed, patch_embeddings, atol=1e-3) # verify encoder hidden states original_last_hidden_state = original_model.encoder(pixel_values) last_hidden_state = model.encoder(pixel_values).last_hidden_state assert torch.allclose(original_last_hidden_state, last_hidden_state, atol=1e-2) # verify decoder hidden states original_logits = original_model(pixel_values, prompt_tensors, None).logits logits = model(pixel_values, decoder_input_ids=prompt_tensors).logits assert torch.allclose(original_logits, logits, atol=1e-3) print("Looks ok!") if pytorch_dump_folder_path is not None: print(f"Saving model and processor to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: model.push_to_hub("nielsr/" + model_name.split("/")[-1], commit_message="Update model") processor.push_to_hub("nielsr/" + model_name.split("/")[-1], commit_message="Update model") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="naver-clova-ix/donut-base-finetuned-docvqa", required=False, type=str, help="Name of the original model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, required=False, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model and processor to the 🤗 hub.", ) args = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/donut/__init__.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = { "configuration_donut_swin": ["DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "DonutSwinConfig"], "processing_donut": ["DonutProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_donut_swin"] = [ "DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", "DonutSwinModel", "DonutSwinPreTrainedModel", ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["feature_extraction_donut"] = ["DonutFeatureExtractor"] _import_structure["image_processing_donut"] = ["DonutImageProcessor"] if TYPE_CHECKING: from .configuration_donut_swin import DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, DonutSwinConfig from .processing_donut import DonutProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_donut_swin import ( DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, DonutSwinModel, DonutSwinPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_donut import DonutFeatureExtractor from .image_processing_donut import DonutImageProcessor else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/donut/processing_donut.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for Donut. """ import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class DonutProcessor(ProcessorMixin): r""" Constructs a Donut processor which wraps a Donut image processor and an XLMRoBERTa tokenizer into a single processor. [`DonutProcessor`] offers all the functionalities of [`DonutImageProcessor`] and [`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`]. See the [`~DonutProcessor.__call__`] and [`~DonutProcessor.decode`] for more information. Args: image_processor ([`DonutImageProcessor`], *optional*): An instance of [`DonutImageProcessor`]. The image processor is a required input. tokenizer ([`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`], *optional*): An instance of [`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`]. The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "AutoImageProcessor" tokenizer_class = "AutoTokenizer" def __init__(self, image_processor=None, tokenizer=None, **kwargs): feature_extractor = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead.", FutureWarning, ) feature_extractor = kwargs.pop("feature_extractor") image_processor = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(image_processor, tokenizer) self.current_processor = self.image_processor self._in_target_context_manager = False def __call__(self, *args, **kwargs): """ When used in normal mode, this method forwards all its arguments to AutoImageProcessor's [`~AutoImageProcessor.__call__`] and returns its output. If used in the context [`~DonutProcessor.as_target_processor`] this method forwards all its arguments to DonutTokenizer's [`~DonutTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information. """ # For backward compatibility if self._in_target_context_manager: return self.current_processor(*args, **kwargs) images = kwargs.pop("images", None) text = kwargs.pop("text", None) if len(args) > 0: images = args[0] args = args[1:] if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process.") if images is not None: inputs = self.image_processor(images, *args, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif images is None: return encodings else: inputs["labels"] = encodings["input_ids"] return inputs def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to DonutTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to DonutTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @contextmanager def as_target_processor(self): """ Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning TrOCR. """ warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your images inputs, or in a separate call." ) self._in_target_context_manager = True self.current_processor = self.tokenizer yield self.current_processor = self.image_processor self._in_target_context_manager = False def token2json(self, tokens, is_inner_value=False, added_vocab=None): """ Convert a (generated) token sequence into an ordered JSON format. """ if added_vocab is None: added_vocab = self.tokenizer.get_added_vocab() output = {} while tokens: start_token = re.search(r"<s_(.*?)>", tokens, re.IGNORECASE) if start_token is None: break key = start_token.group(1) key_escaped = re.escape(key) end_token = re.search(rf"</s_{key_escaped}>", tokens, re.IGNORECASE) start_token = start_token.group() if end_token is None: tokens = tokens.replace(start_token, "") else: end_token = end_token.group() start_token_escaped = re.escape(start_token) end_token_escaped = re.escape(end_token) content = re.search( f"{start_token_escaped}(.*?){end_token_escaped}", tokens, re.IGNORECASE | re.DOTALL ) if content is not None: content = content.group(1).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node value = self.token2json(content, is_inner_value=True, added_vocab=added_vocab) if value: if len(value) == 1: value = value[0] output[key] = value else: # leaf nodes output[key] = [] for leaf in content.split(r"<sep/>"): leaf = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": leaf = leaf[1:-2] # for categorical special tokens output[key].append(leaf) if len(output[key]) == 1: output[key] = output[key][0] tokens = tokens[tokens.find(end_token) + len(end_token) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.token2json(tokens[6:], is_inner_value=True, added_vocab=added_vocab) if len(output): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def feature_extractor_class(self): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", FutureWarning, ) return self.image_processor_class @property def feature_extractor(self): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", FutureWarning, ) return self.image_processor
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/donut/feature_extraction_donut.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for Donut.""" import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor logger = logging.get_logger(__name__) class DonutFeatureExtractor(DonutImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( "The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use DonutImageProcessor instead.", FutureWarning, ) super().__init__(*args, **kwargs)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/donut/modeling_donut_swin.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Donut Swin Transformer model. This implementation is identical to a regular Swin Transformer, without final layer norm on top of the final hidden states.""" import collections.abc import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, ) from .configuration_donut_swin import DonutSwinConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "DonutSwinConfig" # Base docstring _CHECKPOINT_FOR_DOC = "https://huggingface.co/naver-clova-ix/donut-base" _EXPECTED_OUTPUT_SHAPE = [1, 49, 768] from ..deprecated._archive_maps import DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 @dataclass # Copied from transformers.models.swin.modeling_swin.SwinEncoderOutput with Swin->DonutSwin class DonutSwinEncoderOutput(ModelOutput): """ DonutSwin encoder's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass # Copied from transformers.models.swin.modeling_swin.SwinModelOutput with Swin->DonutSwin class DonutSwinModelOutput(ModelOutput): """ DonutSwin model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed): Average pooling of the last layer hidden-state. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: torch.FloatTensor = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None # Copied from transformers.models.swin.modeling_swin.window_partition def window_partition(input_feature, window_size): """ Partitions the given input into windows. """ batch_size, height, width, num_channels = input_feature.shape input_feature = input_feature.view( batch_size, height // window_size, window_size, width // window_size, window_size, num_channels ) windows = input_feature.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels) return windows # Copied from transformers.models.swin.modeling_swin.window_reverse def window_reverse(windows, window_size, height, width): """ Merges windows to produce higher resolution features. """ num_channels = windows.shape[-1] windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels) windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels) return windows # Copied from transformers.models.swin.modeling_swin.SwinEmbeddings with Swin->DonutSwin class DonutSwinEmbeddings(nn.Module): """ Construct the patch and position embeddings. Optionally, also the mask token. """ def __init__(self, config, use_mask_token=False): super().__init__() self.patch_embeddings = DonutSwinPatchEmbeddings(config) num_patches = self.patch_embeddings.num_patches self.patch_grid = self.patch_embeddings.grid_size self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None if config.use_absolute_embeddings: self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim)) else: self.position_embeddings = None self.norm = nn.LayerNorm(config.embed_dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward( self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor] = None ) -> Tuple[torch.Tensor]: embeddings, output_dimensions = self.patch_embeddings(pixel_values) embeddings = self.norm(embeddings) batch_size, seq_len, _ = embeddings.size() if bool_masked_pos is not None: mask_tokens = self.mask_token.expand(batch_size, seq_len, -1) # replace the masked visual tokens by mask_tokens mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) embeddings = embeddings * (1.0 - mask) + mask_tokens * mask if self.position_embeddings is not None: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings, output_dimensions # Copied from transformers.models.swin.modeling_swin.SwinPatchEmbeddings class DonutSwinPatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config): super().__init__() image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.embed_dim image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1]) self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def maybe_pad(self, pixel_values, height, width): if width % self.patch_size[1] != 0: pad_values = (0, self.patch_size[1] - width % self.patch_size[1]) pixel_values = nn.functional.pad(pixel_values, pad_values) if height % self.patch_size[0] != 0: pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0]) pixel_values = nn.functional.pad(pixel_values, pad_values) return pixel_values def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor, Tuple[int]]: _, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # pad the input to be divisible by self.patch_size, if needed pixel_values = self.maybe_pad(pixel_values, height, width) embeddings = self.projection(pixel_values) _, _, height, width = embeddings.shape output_dimensions = (height, width) embeddings = embeddings.flatten(2).transpose(1, 2) return embeddings, output_dimensions # Copied from transformers.models.swin.modeling_swin.SwinPatchMerging class DonutSwinPatchMerging(nn.Module): """ Patch Merging Layer. Args: input_resolution (`Tuple[int]`): Resolution of input feature. dim (`int`): Number of input channels. norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`): Normalization layer class. """ def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None: super().__init__() self.input_resolution = input_resolution self.dim = dim self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) self.norm = norm_layer(4 * dim) def maybe_pad(self, input_feature, height, width): should_pad = (height % 2 == 1) or (width % 2 == 1) if should_pad: pad_values = (0, 0, 0, width % 2, 0, height % 2) input_feature = nn.functional.pad(input_feature, pad_values) return input_feature def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor: height, width = input_dimensions # `dim` is height * width batch_size, dim, num_channels = input_feature.shape input_feature = input_feature.view(batch_size, height, width, num_channels) # pad input to be disible by width and height, if needed input_feature = self.maybe_pad(input_feature, height, width) # [batch_size, height/2, width/2, num_channels] input_feature_0 = input_feature[:, 0::2, 0::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_1 = input_feature[:, 1::2, 0::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_2 = input_feature[:, 0::2, 1::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_3 = input_feature[:, 1::2, 1::2, :] # batch_size height/2 width/2 4*num_channels input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1) input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # batch_size height/2*width/2 4*C input_feature = self.norm(input_feature) input_feature = self.reduction(input_feature) return input_feature # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output # Copied from transformers.models.swin.modeling_swin.SwinDropPath class DonutSwinDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob) # Copied from transformers.models.swin.modeling_swin.SwinSelfAttention with Swin->DonutSwin class DonutSwinSelfAttention(nn.Module): def __init__(self, config, dim, num_heads, window_size): super().__init__() if dim % num_heads != 0: raise ValueError( f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})" ) self.num_attention_heads = num_heads self.attention_head_size = int(dim / num_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.window_size = ( window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size) ) self.relative_position_bias_table = nn.Parameter( torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads) ) # get pair-wise relative position index for each token inside the window coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij")) coords_flatten = torch.flatten(coords, 1) relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] relative_coords = relative_coords.permute(1, 2, 0).contiguous() relative_coords[:, :, 0] += self.window_size[0] - 1 relative_coords[:, :, 1] += self.window_size[1] - 1 relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) self.register_buffer("relative_position_index", relative_position_index) self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: batch_size, dim, num_channels = hidden_states.shape mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)] relative_position_bias = relative_position_bias.view( self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1 ) relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() attention_scores = attention_scores + relative_position_bias.unsqueeze(0) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in DonutSwinModel forward() function) mask_shape = attention_mask.shape[0] attention_scores = attention_scores.view( batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim ) attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0) attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.swin.modeling_swin.SwinSelfOutput class DonutSwinSelfOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, dim) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states # Copied from transformers.models.swin.modeling_swin.SwinAttention with Swin->DonutSwin class DonutSwinAttention(nn.Module): def __init__(self, config, dim, num_heads, window_size): super().__init__() self.self = DonutSwinSelfAttention(config, dim, num_heads, window_size) self.output = DonutSwinSelfOutput(config, dim) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.swin.modeling_swin.SwinIntermediate class DonutSwinIntermediate(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, int(config.mlp_ratio * dim)) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.swin.modeling_swin.SwinOutput class DonutSwinOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(int(config.mlp_ratio * dim), dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states # Copied from transformers.models.swin.modeling_swin.SwinLayer with Swin->DonutSwin class DonutSwinLayer(nn.Module): def __init__(self, config, dim, input_resolution, num_heads, shift_size=0): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.shift_size = shift_size self.window_size = config.window_size self.input_resolution = input_resolution self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.attention = DonutSwinAttention(config, dim, num_heads, window_size=self.window_size) self.drop_path = DonutSwinDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity() self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.intermediate = DonutSwinIntermediate(config, dim) self.output = DonutSwinOutput(config, dim) def set_shift_and_window_size(self, input_resolution): if min(input_resolution) <= self.window_size: # if window size is larger than input resolution, we don't partition windows self.shift_size = 0 self.window_size = min(input_resolution) def get_attn_mask(self, height, width, dtype): if self.shift_size > 0: # calculate attention mask for SW-MSA img_mask = torch.zeros((1, height, width, 1), dtype=dtype) height_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None), ) width_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None), ) count = 0 for height_slice in height_slices: for width_slice in width_slices: img_mask[:, height_slice, width_slice, :] = count count += 1 mask_windows = window_partition(img_mask, self.window_size) mask_windows = mask_windows.view(-1, self.window_size * self.window_size) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) else: attn_mask = None return attn_mask def maybe_pad(self, hidden_states, height, width): pad_right = (self.window_size - width % self.window_size) % self.window_size pad_bottom = (self.window_size - height % self.window_size) % self.window_size pad_values = (0, 0, 0, pad_right, 0, pad_bottom) hidden_states = nn.functional.pad(hidden_states, pad_values) return hidden_states, pad_values def forward( self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int], head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, always_partition: Optional[bool] = False, ) -> Tuple[torch.Tensor, torch.Tensor]: if not always_partition: self.set_shift_and_window_size(input_dimensions) else: pass height, width = input_dimensions batch_size, _, channels = hidden_states.size() shortcut = hidden_states hidden_states = self.layernorm_before(hidden_states) hidden_states = hidden_states.view(batch_size, height, width, channels) # pad hidden_states to multiples of window size hidden_states, pad_values = self.maybe_pad(hidden_states, height, width) _, height_pad, width_pad, _ = hidden_states.shape # cyclic shift if self.shift_size > 0: shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) else: shifted_hidden_states = hidden_states # partition windows hidden_states_windows = window_partition(shifted_hidden_states, self.window_size) hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels) attn_mask = self.get_attn_mask(height_pad, width_pad, dtype=hidden_states.dtype) if attn_mask is not None: attn_mask = attn_mask.to(hidden_states_windows.device) attention_outputs = self.attention( hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions ) attention_output = attention_outputs[0] attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels) shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad) # reverse cyclic shift if self.shift_size > 0: attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: attention_windows = shifted_windows was_padded = pad_values[3] > 0 or pad_values[5] > 0 if was_padded: attention_windows = attention_windows[:, :height, :width, :].contiguous() attention_windows = attention_windows.view(batch_size, height * width, channels) hidden_states = shortcut + self.drop_path(attention_windows) layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) layer_output = hidden_states + self.output(layer_output) layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,) return layer_outputs # Copied from transformers.models.swin.modeling_swin.SwinStage with Swin->DonutSwin class DonutSwinStage(nn.Module): def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample): super().__init__() self.config = config self.dim = dim self.blocks = nn.ModuleList( [ DonutSwinLayer( config=config, dim=dim, input_resolution=input_resolution, num_heads=num_heads, shift_size=0 if (i % 2 == 0) else config.window_size // 2, ) for i in range(depth) ] ) # patch merging layer if downsample is not None: self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm) else: self.downsample = None self.pointing = False def forward( self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int], head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, always_partition: Optional[bool] = False, ) -> Tuple[torch.Tensor]: height, width = input_dimensions for i, layer_module in enumerate(self.blocks): layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module( hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition ) hidden_states = layer_outputs[0] hidden_states_before_downsampling = hidden_states if self.downsample is not None: height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2 output_dimensions = (height, width, height_downsampled, width_downsampled) hidden_states = self.downsample(hidden_states_before_downsampling, input_dimensions) else: output_dimensions = (height, width, height, width) stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions) if output_attentions: stage_outputs += layer_outputs[1:] return stage_outputs # Copied from transformers.models.swin.modeling_swin.SwinEncoder with Swin->DonutSwin class DonutSwinEncoder(nn.Module): def __init__(self, config, grid_size): super().__init__() self.num_layers = len(config.depths) self.config = config dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] self.layers = nn.ModuleList( [ DonutSwinStage( config=config, dim=int(config.embed_dim * 2**i_layer), input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)), depth=config.depths[i_layer], num_heads=config.num_heads[i_layer], drop_path=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])], downsample=DonutSwinPatchMerging if (i_layer < self.num_layers - 1) else None, ) for i_layer in range(self.num_layers) ] ) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int], head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, output_hidden_states_before_downsampling: Optional[bool] = False, always_partition: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple, DonutSwinEncoderOutput]: all_hidden_states = () if output_hidden_states else None all_reshaped_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if output_hidden_states: batch_size, _, hidden_size = hidden_states.shape # rearrange b (h w) c -> b c h w reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) for i, layer_module in enumerate(self.layers): layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition, ) else: layer_outputs = layer_module( hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition ) hidden_states = layer_outputs[0] hidden_states_before_downsampling = layer_outputs[1] output_dimensions = layer_outputs[2] input_dimensions = (output_dimensions[-2], output_dimensions[-1]) if output_hidden_states and output_hidden_states_before_downsampling: batch_size, _, hidden_size = hidden_states_before_downsampling.shape # rearrange b (h w) c -> b c h w # here we use the original (not downsampled) height and width reshaped_hidden_state = hidden_states_before_downsampling.view( batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size ) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states_before_downsampling,) all_reshaped_hidden_states += (reshaped_hidden_state,) elif output_hidden_states and not output_hidden_states_before_downsampling: batch_size, _, hidden_size = hidden_states.shape # rearrange b (h w) c -> b c h w reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) if output_attentions: all_self_attentions += layer_outputs[3:] if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return DonutSwinEncoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, reshaped_hidden_states=all_reshaped_hidden_states, ) # Copied from transformers.models.swin.modeling_swin.SwinPreTrainedModel with Swin->DonutSwin class DonutSwinPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = DonutSwinConfig base_model_prefix = "swin" main_input_name = "pixel_values" supports_gradient_checkpointing = True _no_split_modules = ["DonutSwinStage"] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) SWIN_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`DonutSwinConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ SWIN_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`DonutImageProcessor.__call__`] for details. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Donut Swin Model transformer outputting raw hidden-states without any specific head on top.", SWIN_START_DOCSTRING, ) class DonutSwinModel(DonutSwinPreTrainedModel): def __init__(self, config, add_pooling_layer=True, use_mask_token=False): super().__init__(config) self.config = config self.num_layers = len(config.depths) self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1)) self.embeddings = DonutSwinEmbeddings(config, use_mask_token=use_mask_token) self.encoder = DonutSwinEncoder(config, self.embeddings.patch_grid) self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=DonutSwinModelOutput, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, bool_masked_pos: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, DonutSwinModelOutput]: r""" bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, len(self.config.depths)) embedding_output, input_dimensions = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos) encoder_outputs = self.encoder( embedding_output, input_dimensions, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = None if self.pooler is not None: pooled_output = self.pooler(sequence_output.transpose(1, 2)) pooled_output = torch.flatten(pooled_output, 1) if not return_dict: output = (sequence_output, pooled_output) + encoder_outputs[1:] return output return DonutSwinModelOutput( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states, )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/donut/configuration_donut_swin.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Donut Swin Transformer model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) from ..deprecated._archive_maps import DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class DonutSwinConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DonutSwinModel`]. It is used to instantiate a Donut model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Donut [naver-clova-ix/donut-base](https://huggingface.co/naver-clova-ix/donut-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 4): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. embed_dim (`int`, *optional*, defaults to 96): Dimensionality of patch embedding. depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`): Depth of each layer in the Transformer encoder. num_heads (`list(int)`, *optional*, defaults to `[3, 6, 12, 24]`): Number of attention heads in each layer of the Transformer encoder. window_size (`int`, *optional*, defaults to 7): Size of windows. mlp_ratio (`float`, *optional*, defaults to 4.0): Ratio of MLP hidden dimensionality to embedding dimensionality. qkv_bias (`bool`, *optional*, defaults to `True`): Whether or not a learnable bias should be added to the queries, keys and values. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings and encoder. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. drop_path_rate (`float`, *optional*, defaults to 0.1): Stochastic depth rate. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. use_absolute_embeddings (`bool`, *optional*, defaults to `False`): Whether or not to add absolute position embeddings to the patch embeddings. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. Example: ```python >>> from transformers import DonutSwinConfig, DonutSwinModel >>> # Initializing a Donut naver-clova-ix/donut-base style configuration >>> configuration = DonutSwinConfig() >>> # Randomly initializing a model from the naver-clova-ix/donut-base style configuration >>> model = DonutSwinModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "donut-swin" attribute_map = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self, image_size=224, patch_size=4, num_channels=3, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", use_absolute_embeddings=False, initializer_range=0.02, layer_norm_eps=1e-5, **kwargs, ): super().__init__(**kwargs) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_layers = len(depths) self.num_heads = num_heads self.window_size = window_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_absolute_embeddings = use_absolute_embeddings self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py
# coding=utf-8 # Copyright 2024 The Seamless Authors and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Wav2Vec2-BERT model.""" import math import warnings from typing import Optional, Tuple, Union import numpy as np import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...integrations.deepspeed import is_deepspeed_zero3_enabled from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_outputs import ( BaseModelOutput, CausalLMOutput, SequenceClassifierOutput, TokenClassifierOutput, Wav2Vec2BaseModelOutput, XVectorOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_peft_available, logging, ) from .configuration_wav2vec2_bert import Wav2Vec2BertConfig logger = logging.get_logger(__name__) _HIDDEN_STATES_START_POSITION = 2 # General docstring _CONFIG_FOR_DOC = "Wav2Vec2BertConfig" # Base docstring _BASE_CHECKPOINT_FOR_DOC = "facebook/w2v-bert-2.0" _PRETRAINED_CHECKPOINT_FOR_DOC = "hf-audio/wav2vec2-bert-CV16-en" _EXPECTED_OUTPUT_SHAPE = [1, 146, 1024] # CTC docstring _CTC_EXPECTED_OUTPUT = "'mr quilter is the apostle of the middle classes and we are glad to welcome his gospel'" _CTC_EXPECTED_LOSS = 17.04 from ..deprecated._archive_maps import WAV2VEC2_BERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 # Copied from transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2._compute_new_attention_mask def _compute_new_attention_mask(hidden_states: torch.Tensor, seq_lens: torch.Tensor): """ Computes an attention mask of the form `(batch, seq_len)` with an attention for each element in the batch that stops at the corresponding element in `seq_lens`. Args: hidden_states (`torch.FloatTensor` of shape `(batch, seq_len, *)`): The sequences to mask, where `*` is any number of sequence-specific dimensions including none. seq_lens (`torch.Tensor` of shape `(batch)`: Each element represents the length of the sequence at the same index in `hidden_states` Returns: `torch.FloatTensor`: The float attention mask of shape `(batch, seq_len)` """ batch_size, mask_seq_len = hidden_states.shape[:2] indices = torch.arange(mask_seq_len, device=seq_lens.device).expand(batch_size, -1) bool_mask = indices >= seq_lens.unsqueeze(1).expand(-1, mask_seq_len) mask = hidden_states.new_ones((batch_size, mask_seq_len)) mask = mask.masked_fill(bool_mask, 0) return mask # Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices def _compute_mask_indices( shape: Tuple[int, int], mask_prob: float, mask_length: int, attention_mask: Optional[torch.LongTensor] = None, min_masks: int = 0, ) -> np.ndarray: """ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on CPU as part of the preprocessing during training. Args: shape: The shape for which to compute masks. This should be of a tuple of size 2 where the first element is the batch size and the second element is the length of the axis to span. mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of independently generated mask spans of length `mask_length` is computed by `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the actual percentage will be smaller. mask_length: size of the mask min_masks: minimum number of masked spans attention_mask: A (right-padded) attention mask which independently shortens the feature axis of each batch dimension. """ batch_size, sequence_length = shape if mask_length < 1: raise ValueError("`mask_length` has to be bigger than 0.") if mask_length > sequence_length: raise ValueError( f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}" f" and `sequence_length`: {sequence_length}`" ) # epsilon is used for probabilistic rounding epsilon = np.random.rand(1).item() def compute_num_masked_span(input_length): """Given input length, compute how many spans should be masked""" num_masked_span = int(mask_prob * input_length / mask_length + epsilon) num_masked_span = max(num_masked_span, min_masks) # make sure num masked span <= sequence_length if num_masked_span * mask_length > sequence_length: num_masked_span = sequence_length // mask_length # make sure num_masked span is also <= input_length - (mask_length - 1) if input_length - (mask_length - 1) < num_masked_span: num_masked_span = max(input_length - (mask_length - 1), 0) return num_masked_span # compute number of masked spans in batch input_lengths = ( attention_mask.sum(-1).detach().tolist() if attention_mask is not None else [sequence_length for _ in range(batch_size)] ) # SpecAugment mask to fill spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool) spec_aug_mask_idxs = [] max_num_masked_span = compute_num_masked_span(sequence_length) if max_num_masked_span == 0: return spec_aug_mask for input_length in input_lengths: # compute num of masked spans for this input num_masked_span = compute_num_masked_span(input_length) # get random indices to mask spec_aug_mask_idx = np.random.choice( np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False ) # pick first sampled index that will serve as a dummy index to pad vector # to ensure same dimension for all batches due to probabilistic rounding # Picking first sample just pads those vectors twice. if len(spec_aug_mask_idx) == 0: # this case can only happen if `input_length` is strictly smaller then # `sequence_length` in which case the last token has to be a padding # token which we can use as a dummy mask id dummy_mask_idx = sequence_length - 1 else: dummy_mask_idx = spec_aug_mask_idx[0] spec_aug_mask_idx = np.concatenate( [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx] ) spec_aug_mask_idxs.append(spec_aug_mask_idx) spec_aug_mask_idxs = np.array(spec_aug_mask_idxs) # expand masked indices to masked spans spec_aug_mask_idxs = np.broadcast_to( spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length) ) spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length) # add offset to the starting indexes so that indexes now create a span offsets = np.arange(mask_length)[None, None, :] offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape( batch_size, max_num_masked_span * mask_length ) spec_aug_mask_idxs = spec_aug_mask_idxs + offsets # ensure that we cannot have indices larger than sequence_length if spec_aug_mask_idxs.max() > sequence_length - 1: spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1 # scatter indices to mask np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1) return spec_aug_mask # Copied from transformers.models.wav2vec2.modeling_wav2vec2._sample_negative_indices def _sample_negative_indices( features_shape: Tuple, num_negatives: int, mask_time_indices: Optional[np.ndarray] = None ): """ Sample `num_negatives` vectors from feature vectors. """ batch_size, sequence_length = features_shape # generate indices of the positive vectors themselves, repeat them `num_negatives` times sequence_length_range = np.arange(sequence_length) # get `num_negatives` random vector indices from the same utterance sampled_negative_indices = np.zeros(shape=(batch_size, sequence_length, num_negatives), dtype=np.int32) mask_time_indices = ( mask_time_indices.astype(bool) if mask_time_indices is not None else np.ones(features_shape, dtype=bool) ) for batch_idx in range(batch_size): high = mask_time_indices[batch_idx].sum() - 1 mapped_masked_indices = sequence_length_range[mask_time_indices[batch_idx]] feature_indices = np.broadcast_to(np.arange(high + 1)[:, None], (high + 1, num_negatives)) sampled_indices = np.random.randint(0, high, size=(high + 1, num_negatives)) # avoid sampling the same positive vector, but keep the distribution uniform sampled_indices[sampled_indices >= feature_indices] += 1 # remap to actual indices sampled_negative_indices[batch_idx][mask_time_indices[batch_idx]] = mapped_masked_indices[sampled_indices] # correct for batch size sampled_negative_indices[batch_idx] += batch_idx * sequence_length return sampled_negative_indices # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerRotaryPositionalEmbedding with Wav2Vec2Conformer->Wav2Vec2Bert class Wav2Vec2BertRotaryPositionalEmbedding(nn.Module): """Rotary positional embedding Reference : https://blog.eleuther.ai/rotary-embeddings/ Paper: https://arxiv.org/pdf/2104.09864.pdf """ def __init__(self, config): super().__init__() dim = config.hidden_size // config.num_attention_heads base = config.rotary_embedding_base inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.int64).float() / dim)) # Ignore copy self.register_buffer("inv_freq", inv_freq, persistent=False) self.cached_sequence_length = None self.cached_rotary_positional_embedding = None def forward(self, hidden_states): sequence_length = hidden_states.shape[1] if sequence_length == self.cached_sequence_length and self.cached_rotary_positional_embedding is not None: return self.cached_rotary_positional_embedding self.cached_sequence_length = sequence_length # Embeddings are computed in the dtype of the inv_freq constant time_stamps = torch.arange(sequence_length).type_as(self.inv_freq) freqs = torch.einsum("i,j->ij", time_stamps, self.inv_freq) embeddings = torch.cat((freqs, freqs), dim=-1) cos_embeddings = embeddings.cos()[:, None, None, :] sin_embeddings = embeddings.sin()[:, None, None, :] # Computed embeddings are cast to the dtype of the hidden state inputs self.cached_rotary_positional_embedding = torch.stack([cos_embeddings, sin_embeddings]).type_as(hidden_states) return self.cached_rotary_positional_embedding # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerRelPositionalEmbedding with Wav2Vec2Conformer->Wav2Vec2Bert class Wav2Vec2BertRelPositionalEmbedding(nn.Module): """Relative positional encoding module.""" def __init__(self, config): super().__init__() self.max_len = config.max_source_positions self.d_model = config.hidden_size self.pe = None self.extend_pe(torch.tensor(0.0).expand(1, self.max_len)) def extend_pe(self, x): # Reset the positional encodings if self.pe is not None: # self.pe contains both positive and negative parts # the length of self.pe is 2 * input_len - 1 if self.pe.size(1) >= x.size(1) * 2 - 1: if self.pe.dtype != x.dtype or self.pe.device != x.device: self.pe = self.pe.to(dtype=x.dtype, device=x.device) return # Suppose `i` is the position of query vector and `j` is the # position of key vector. We use positive relative positions when keys # are to the left (i>j) and negative relative positions otherwise (i<j). pe_positive = torch.zeros(x.size(1), self.d_model) pe_negative = torch.zeros(x.size(1), self.d_model) position = torch.arange(0, x.size(1), dtype=torch.int64).float().unsqueeze(1) div_term = torch.exp( torch.arange(0, self.d_model, 2, dtype=torch.int64).float() * -(math.log(10000.0) / self.d_model) ) pe_positive[:, 0::2] = torch.sin(position * div_term) pe_positive[:, 1::2] = torch.cos(position * div_term) pe_negative[:, 0::2] = torch.sin(-1 * position * div_term) pe_negative[:, 1::2] = torch.cos(-1 * position * div_term) # Reverse the order of positive indices and concat both positive and # negative indices. This is used to support the shifting trick # as in https://arxiv.org/abs/1901.02860 pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0) pe_negative = pe_negative[1:].unsqueeze(0) pe = torch.cat([pe_positive, pe_negative], dim=1) self.pe = pe.to(device=x.device, dtype=x.dtype) def forward(self, hidden_states: torch.Tensor): self.extend_pe(hidden_states) start_idx = self.pe.size(1) // 2 - hidden_states.size(1) + 1 end_idx = self.pe.size(1) // 2 + hidden_states.size(1) relative_position_embeddings = self.pe[:, start_idx:end_idx] return relative_position_embeddings class Wav2Vec2BertFeatureProjection(nn.Module): def __init__(self, config): super().__init__() self.layer_norm = nn.LayerNorm(config.feature_projection_input_dim, eps=config.layer_norm_eps) self.projection = nn.Linear(config.feature_projection_input_dim, config.hidden_size) self.dropout = nn.Dropout(config.feat_proj_dropout) def forward(self, hidden_states): # non-projected hidden states are needed for quantization norm_hidden_states = self.layer_norm(hidden_states) hidden_states = self.projection(norm_hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states, norm_hidden_states class Wav2Vec2BertFeedForward(nn.Module): def __init__(self, config, act_fn=None, hidden_size=None): super().__init__() act_fn = act_fn if act_fn is not None else config.hidden_act hidden_size = hidden_size if hidden_size is not None else config.hidden_size self.intermediate_dropout = nn.Dropout(config.activation_dropout) self.intermediate_dense = nn.Linear(hidden_size, config.intermediate_size) self.intermediate_act_fn = ACT2FN[act_fn] if isinstance(act_fn, str) else act_fn self.output_dense = nn.Linear(config.intermediate_size, hidden_size) self.output_dropout = nn.Dropout(config.hidden_dropout) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward.forward def forward(self, hidden_states): hidden_states = self.intermediate_dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.intermediate_dropout(hidden_states) hidden_states = self.output_dense(hidden_states) hidden_states = self.output_dropout(hidden_states) return hidden_states class Wav2Vec2BertConvolutionModule(nn.Module): """Convolution block used in the conformer block""" def __init__(self, config): super().__init__() if (config.conv_depthwise_kernel_size - 1) % 2 == 1: raise ValueError("`config.conv_depthwise_kernel_size` should be a odd number for 'SAME' padding") self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pointwise_conv1 = nn.Conv1d( config.hidden_size, 2 * config.hidden_size, kernel_size=1, stride=1, padding=0, bias=False, ) self.glu = nn.GLU(dim=1) self.depthwise_conv = nn.Conv1d( config.hidden_size, config.hidden_size, config.conv_depthwise_kernel_size, stride=1, padding=0, groups=config.hidden_size, bias=False, ) self.depthwise_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.activation = ACT2FN[config.hidden_act] self.pointwise_conv2 = nn.Conv1d( config.hidden_size, config.hidden_size, kernel_size=1, stride=1, padding=0, bias=False, ) self.dropout = nn.Dropout(config.conformer_conv_dropout) def forward(self, hidden_states, attention_mask=None): hidden_states = self.layer_norm(hidden_states) # Ensure that we do not leak padded positions in depthwise convolution if attention mask is passed. # Put 0 where necessary if attention_mask is not None: hidden_states = hidden_states.masked_fill(~attention_mask.bool().unsqueeze(-1), 0.0) # exchange the temporal dimension and the feature dimension hidden_states = hidden_states.transpose(1, 2) # GLU mechanism # => (batch, 2*channel, dim) hidden_states = self.pointwise_conv1(hidden_states) # => (batch, channel, dim) hidden_states = self.glu(hidden_states) # Pad the sequence entirely on the left because of causal convolution. hidden_states = torch.nn.functional.pad(hidden_states, (self.depthwise_conv.kernel_size[0] - 1, 0)) # 1D Depthwise Conv hidden_states = self.depthwise_conv(hidden_states) hidden_states = self.depthwise_layer_norm(hidden_states.transpose(1, 2)).transpose(1, 2) hidden_states = self.activation(hidden_states) hidden_states = self.pointwise_conv2(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states.transpose(1, 2) return hidden_states class Wav2Vec2BertSelfAttention(nn.Module): """Construct an Wav2Vec2BertSelfAttention object. Can be enhanced with rotary or relative position embeddings. """ def __init__(self, config, is_adapter_attention=False): super().__init__() hidden_size = config.hidden_size if not is_adapter_attention else config.output_hidden_size self.head_size = hidden_size // config.num_attention_heads self.num_heads = config.num_attention_heads self.position_embeddings_type = config.position_embeddings_type if not is_adapter_attention else None self.linear_q = nn.Linear(hidden_size, hidden_size) self.linear_k = nn.Linear(hidden_size, hidden_size) self.linear_v = nn.Linear(hidden_size, hidden_size) self.linear_out = nn.Linear(hidden_size, hidden_size) self.dropout = nn.Dropout(p=config.attention_dropout) if self.position_embeddings_type == "relative": # linear transformation for positional encoding self.linear_pos = nn.Linear(hidden_size, hidden_size, bias=False) # these two learnable bias are used in matrix c and matrix d # as described in https://arxiv.org/abs/1901.02860 Section 3.3 self.pos_bias_u = nn.Parameter(torch.zeros(self.num_heads, self.head_size)) self.pos_bias_v = nn.Parameter(torch.zeros(self.num_heads, self.head_size)) if self.position_embeddings_type == "relative_key": self.left_max_position_embeddings = config.left_max_position_embeddings self.right_max_position_embeddings = config.right_max_position_embeddings num_positions = self.left_max_position_embeddings + self.right_max_position_embeddings + 1 self.distance_embedding = nn.Embedding(num_positions, self.head_size) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, relative_position_embeddings: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: # self-attention mechanism batch_size, sequence_length, hidden_size = hidden_states.size() # make sure query/key states can be != value states query_key_states = hidden_states value_states = hidden_states if self.position_embeddings_type == "rotary": if relative_position_embeddings is None: raise ValueError( "`relative_position_embeddings` has to be defined when `self.position_embeddings_type == 'rotary'" ) query_key_states = self._apply_rotary_embedding(query_key_states, relative_position_embeddings) # project query_key_states and value_states query = self.linear_q(query_key_states).view(batch_size, -1, self.num_heads, self.head_size) key = self.linear_k(query_key_states).view(batch_size, -1, self.num_heads, self.head_size) value = self.linear_v(value_states).view(batch_size, -1, self.num_heads, self.head_size) # => (batch, head, time1, d_k) query = query.transpose(1, 2) key = key.transpose(1, 2) value = value.transpose(1, 2) if self.position_embeddings_type == "relative": if relative_position_embeddings is None: raise ValueError( "`relative_position_embeddings` has to be defined when `self.position_embeddings_type ==" " 'relative'" ) # apply relative_position_embeddings to qk scores # as proposed in Transformer_XL: https://arxiv.org/abs/1901.02860 scores = self._apply_relative_embeddings( query=query, key=key, relative_position_embeddings=relative_position_embeddings ) else: scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.head_size) if self.position_embeddings_type == "relative_key": query_length, key_length = query.shape[2], key.shape[2] position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_r - position_ids_l distance = torch.clamp(distance, -self.left_max_position_embeddings, self.right_max_position_embeddings) positional_embedding = self.distance_embedding(distance + self.left_max_position_embeddings) positional_embedding = positional_embedding.to(dtype=query.dtype) # fp16 compatibility relative_position_attn_weights = torch.einsum("bhld,lrd->bhlr", query, positional_embedding) scores = scores + (relative_position_attn_weights / math.sqrt(self.head_size)) # apply attention_mask if necessary if attention_mask is not None: scores = scores + attention_mask # => (batch, head, time1, time2) probs = torch.softmax(scores, dim=-1) probs = self.dropout(probs) # => (batch, head, time1, d_k) hidden_states = torch.matmul(probs, value) # => (batch, time1, hidden_size) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, self.num_heads * self.head_size) hidden_states = self.linear_out(hidden_states) return hidden_states, probs # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerSelfAttention._apply_rotary_embedding def _apply_rotary_embedding(self, hidden_states, relative_position_embeddings): batch_size, sequence_length, hidden_size = hidden_states.size() hidden_states = hidden_states.view(batch_size, sequence_length, self.num_heads, self.head_size) cos = relative_position_embeddings[0, :sequence_length, ...] sin = relative_position_embeddings[1, :sequence_length, ...] # rotate hidden_states with rotary embeddings hidden_states = hidden_states.transpose(0, 1) rotated_states_begin = hidden_states[..., : self.head_size // 2] rotated_states_end = hidden_states[..., self.head_size // 2 :] rotated_states = torch.cat((-rotated_states_end, rotated_states_begin), dim=rotated_states_begin.ndim - 1) hidden_states = (hidden_states * cos) + (rotated_states * sin) hidden_states = hidden_states.transpose(0, 1) hidden_states = hidden_states.view(batch_size, sequence_length, self.num_heads * self.head_size) return hidden_states # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerSelfAttention._apply_relative_embeddings def _apply_relative_embeddings(self, query, key, relative_position_embeddings): # 1. project positional embeddings # => (batch, head, 2*time1-1, d_k) proj_relative_position_embeddings = self.linear_pos(relative_position_embeddings) proj_relative_position_embeddings = proj_relative_position_embeddings.view( relative_position_embeddings.size(0), -1, self.num_heads, self.head_size ) proj_relative_position_embeddings = proj_relative_position_embeddings.transpose(1, 2) proj_relative_position_embeddings = proj_relative_position_embeddings.transpose(2, 3) # 2. Add bias to query # => (batch, head, time1, d_k) query = query.transpose(1, 2) q_with_bias_u = (query + self.pos_bias_u).transpose(1, 2) q_with_bias_v = (query + self.pos_bias_v).transpose(1, 2) # 3. attention score: first compute matrix a and matrix c # as described in https://arxiv.org/abs/1901.02860 Section 3.3 # => (batch, head, time1, time2) scores_ac = torch.matmul(q_with_bias_u, key.transpose(-2, -1)) # 4. then compute matrix b and matrix d # => (batch, head, time1, 2*time1-1) scores_bd = torch.matmul(q_with_bias_v, proj_relative_position_embeddings) # 5. shift matrix b and matrix d zero_pad = torch.zeros((*scores_bd.size()[:3], 1), device=scores_bd.device, dtype=scores_bd.dtype) scores_bd_padded = torch.cat([zero_pad, scores_bd], dim=-1) scores_bd_padded_shape = scores_bd.size()[:2] + (scores_bd.shape[3] + 1, scores_bd.shape[2]) scores_bd_padded = scores_bd_padded.view(*scores_bd_padded_shape) scores_bd = scores_bd_padded[:, :, 1:].view_as(scores_bd) scores_bd = scores_bd[:, :, :, : scores_bd.size(-1) // 2 + 1] # 6. sum matrices # => (batch, head, time1, time2) scores = (scores_ac + scores_bd) / math.sqrt(self.head_size) return scores class Wav2Vec2BertEncoderLayer(nn.Module): """Conformer block based on https://arxiv.org/abs/2005.08100.""" def __init__(self, config): super().__init__() embed_dim = config.hidden_size dropout = config.attention_dropout # Feed-forward 1 self.ffn1_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.ffn1 = Wav2Vec2BertFeedForward(config) # Self-Attention self.self_attn_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.self_attn_dropout = nn.Dropout(dropout) self.self_attn = Wav2Vec2BertSelfAttention(config) # Conformer Convolution self.conv_module = Wav2Vec2BertConvolutionModule(config) # Feed-forward 2 self.ffn2_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.ffn2 = Wav2Vec2BertFeedForward(config) self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) def forward( self, hidden_states, attention_mask: Optional[torch.Tensor] = None, relative_position_embeddings: Optional[torch.Tensor] = None, output_attentions: bool = False, conv_attention_mask: Optional[torch.Tensor] = None, ): hidden_states = hidden_states # 1. Feed-Forward 1 layer residual = hidden_states hidden_states = self.ffn1_layer_norm(hidden_states) hidden_states = self.ffn1(hidden_states) hidden_states = hidden_states * 0.5 + residual residual = hidden_states # 2. Self-Attention layer hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, attn_weigts = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, relative_position_embeddings=relative_position_embeddings, output_attentions=output_attentions, ) hidden_states = self.self_attn_dropout(hidden_states) hidden_states = hidden_states + residual # 3. Convolutional Layer residual = hidden_states hidden_states = self.conv_module(hidden_states, attention_mask=conv_attention_mask) hidden_states = residual + hidden_states # 4. Feed-Forward 2 Layer residual = hidden_states hidden_states = self.ffn2_layer_norm(hidden_states) hidden_states = self.ffn2(hidden_states) hidden_states = hidden_states * 0.5 + residual hidden_states = self.final_layer_norm(hidden_states) return hidden_states, attn_weigts class Wav2Vec2BertEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config if config.position_embeddings_type == "relative": self.embed_positions = Wav2Vec2BertRelPositionalEmbedding(config) elif config.position_embeddings_type == "rotary": self.embed_positions = Wav2Vec2BertRotaryPositionalEmbedding(config) else: self.embed_positions = None self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList([Wav2Vec2BertEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None conv_attention_mask = attention_mask if attention_mask is not None: # make sure padded tokens output 0 hidden_states = hidden_states.masked_fill(~attention_mask.bool().unsqueeze(-1), 0.0) # extend attention_mask attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype) attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min attention_mask = attention_mask.expand( attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1] ) hidden_states = self.dropout(hidden_states) if self.embed_positions is not None: relative_position_embeddings = self.embed_positions(hidden_states) else: relative_position_embeddings = None deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False if not skip_the_layer or deepspeed_zero3_is_enabled: # under deepspeed zero3 all gpus must run in sync if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer.__call__, hidden_states, attention_mask, relative_position_embeddings, output_attentions, conv_attention_mask, ) else: layer_outputs = layer( hidden_states, attention_mask=attention_mask, relative_position_embeddings=relative_position_embeddings, output_attentions=output_attentions, conv_attention_mask=conv_attention_mask, ) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class Wav2Vec2BertAdapter(nn.Module): def __init__(self, config): super().__init__() # feature dim might need to be down-projected if config.output_hidden_size != config.hidden_size: self.proj = nn.Linear(config.hidden_size, config.output_hidden_size) self.proj_layer_norm = nn.LayerNorm(config.output_hidden_size, eps=config.layer_norm_eps) else: self.proj = self.proj_layer_norm = None self.layers = nn.ModuleList(Wav2Vec2BertAdapterLayer(config) for _ in range(config.num_adapter_layers)) self.layerdrop = config.layerdrop self.kernel_size = config.adapter_kernel_size self.stride = config.adapter_stride def _compute_sub_sample_lengths_from_attention_mask(self, seq_lens): if seq_lens is None: return seq_lens pad = self.kernel_size // 2 seq_lens = ((seq_lens + 2 * pad - self.kernel_size) / self.stride) + 1 return seq_lens.floor() def forward(self, hidden_states, attention_mask=None): # down project hidden_states if necessary if self.proj is not None and self.proj_layer_norm is not None: hidden_states = self.proj(hidden_states) hidden_states = self.proj_layer_norm(hidden_states) sub_sampled_lengths = None if attention_mask is not None: sub_sampled_lengths = (attention_mask.size(1) - (1 - attention_mask.int()).sum(1)).to(hidden_states.device) for layer in self.layers: layerdrop_prob = torch.rand([]) sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(sub_sampled_lengths) if not self.training or (layerdrop_prob > self.layerdrop): hidden_states = layer( hidden_states, attention_mask=attention_mask, sub_sampled_lengths=sub_sampled_lengths ) return hidden_states class Wav2Vec2BertAdapterLayer(nn.Module): def __init__(self, config): super().__init__() embed_dim = config.output_hidden_size dropout = config.conformer_conv_dropout self.kernel_size = config.adapter_kernel_size self.stride = config.adapter_stride # 1. residual convolution self.residual_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.residual_conv = nn.Conv1d( embed_dim, 2 * embed_dim, self.kernel_size, stride=self.stride, padding=self.stride // 2, ) self.activation = nn.GLU(dim=1) # Self-Attention self.self_attn_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.self_attn_conv = nn.Conv1d( embed_dim, 2 * embed_dim, self.kernel_size, stride=self.stride, padding=self.stride // 2, ) self.self_attn = Wav2Vec2BertSelfAttention(config, is_adapter_attention=True) self.self_attn_dropout = nn.Dropout(dropout) # Feed-forward self.ffn_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.ffn = Wav2Vec2BertFeedForward(config, act_fn=config.adapter_act, hidden_size=embed_dim) def forward( self, hidden_states, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, sub_sampled_lengths: Optional[torch.Tensor] = None, ): residual = self.residual_layer_norm(hidden_states) # Apply pooling to the residual to match the sequence length of the # multi-head attention output. # (batch, seq_len, feature_dim) -> (batch, feature_dim, seq_len) residual = residual.transpose(1, 2) residual = self.residual_conv(residual) residual = self.activation(residual) # (batch, feature_dim, seq_len) -> (batch, seq_len, feature_dim) residual = residual.transpose(1, 2) hidden_states = self.self_attn_layer_norm(hidden_states) # Apply pooling before feeding to the multihead-attention layer. # (batch, seq_len, feature_dim) -> (batch, feature_dim, seq_len) hidden_states = hidden_states.transpose(1, 2) hidden_states = self.self_attn_conv(hidden_states) hidden_states = self.activation(hidden_states) # (batch, feature_dim, seq_len) -> (batch, seq_len, feature_dim) hidden_states = hidden_states.transpose(1, 2) if attention_mask is not None: attention_mask = _compute_new_attention_mask(hidden_states=hidden_states, seq_lens=sub_sampled_lengths) attention_mask = _prepare_4d_attention_mask( attention_mask, hidden_states.dtype, ) # The rest of the computation is identical to a vanilla Transformer # encoder layer. hidden_states, attn_weigths = self.self_attn( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, ) hidden_states = self.self_attn_dropout(hidden_states) hidden_states = hidden_states + residual residual = hidden_states hidden_states = self.ffn_layer_norm(hidden_states) hidden_states = self.ffn(hidden_states) + residual return hidden_states # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerPreTrainedModel with Wav2Vec2Conformer->Wav2Vec2Bert,wav2vec2_conformer->wav2vec2_bert, input_values->input_features class Wav2Vec2BertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = Wav2Vec2BertConfig base_model_prefix = "wav2vec2_bert" main_input_name = "input_features" supports_gradient_checkpointing = True # Ignore copy def _init_weights(self, module): """Initialize the weights""" if isinstance(module, Wav2Vec2BertSelfAttention): if hasattr(module, "pos_bias_u"): nn.init.xavier_uniform_(module.pos_bias_u) if hasattr(module, "pos_bias_v"): nn.init.xavier_uniform_(module.pos_bias_v) elif isinstance(module, Wav2Vec2BertFeatureProjection): k = math.sqrt(1 / module.projection.in_features) nn.init.uniform_(module.projection.weight, a=-k, b=k) nn.init.uniform_(module.projection.bias, a=-k, b=k) elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Conv1d): nn.init.kaiming_normal_(module.weight) if module.bias is not None: k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) nn.init.uniform_(module.bias, a=-k, b=k) # Ignore copy def _get_feat_extract_output_lengths( self, input_lengths: Union[torch.LongTensor, int], add_adapter: Optional[bool] = None ): """ Computes the output length of the convolutional layers """ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter def _conv_out_length(input_length, kernel_size, stride, padding): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return torch.div(input_length + 2 * padding - kernel_size, stride, rounding_mode="floor") + 1 if add_adapter: padding = self.config.adapter_kernel_size // 2 for _ in range(self.config.num_adapter_layers): input_lengths = _conv_out_length( input_lengths, self.config.adapter_kernel_size, self.config.adapter_stride, padding ) return input_lengths def _get_feature_vector_attention_mask( self, feature_vector_length: int, attention_mask: torch.LongTensor, add_adapter=None ): # Effectively attention_mask.sum(-1), but not inplace to be able to run # on inference mode. non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1] output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter) output_lengths = output_lengths.to(torch.long) batch_size = attention_mask.shape[0] attention_mask = torch.zeros( (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device ) # these two operations makes sure that all values before the output lengths idxs are attended to attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool() return attention_mask WAV2VEC2_BERT_START_DOCSTRING = r""" Wav2Vec2Bert was proposed in [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.). This model is a PyTorch [nn.Module](https://pytorch.org/docs/stable/nn.html#nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`Wav2Vec2BertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ WAV2VEC2_BERT_INPUTS_DOCSTRING = r""" Args: input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the [`AutoProcessor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2BertProcessor.__call__`] for details. attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Wav2Vec2Bert Model transformer outputting raw hidden-states without any specific head on top.", WAV2VEC2_BERT_START_DOCSTRING, ) class Wav2Vec2BertModel(Wav2Vec2BertPreTrainedModel): def __init__(self, config: Wav2Vec2BertConfig): super().__init__(config) self.config = config self.feature_projection = Wav2Vec2BertFeatureProjection(config) # model only needs masking vector if mask prob is > 0.0 if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0: self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_()) self.encoder = Wav2Vec2BertEncoder(config) self.adapter = Wav2Vec2BertAdapter(config) if config.add_adapter else None self.intermediate_ffn = None if config.use_intermediate_ffn_before_adapter: self.intermediate_ffn = Wav2Vec2BertFeedForward(config, act_fn="relu") # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states def _mask_hidden_states( self, hidden_states: torch.FloatTensor, mask_time_indices: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, ): """ Masks extracted features along time axis and/or along feature axis according to [SpecAugment](https://arxiv.org/abs/1904.08779). """ # `config.apply_spec_augment` can set masking to False if not getattr(self.config, "apply_spec_augment", True): return hidden_states # generate indices & apply SpecAugment along time axis batch_size, sequence_length, hidden_size = hidden_states.size() if mask_time_indices is not None: # apply SpecAugment along time axis with given mask_time_indices hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) elif self.config.mask_time_prob > 0 and self.training: mask_time_indices = _compute_mask_indices( (batch_size, sequence_length), mask_prob=self.config.mask_time_prob, mask_length=self.config.mask_time_length, attention_mask=attention_mask, min_masks=self.config.mask_time_min_masks, ) mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool) hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) if self.config.mask_feature_prob > 0 and self.training: # generate indices & apply SpecAugment along feature axis mask_feature_indices = _compute_mask_indices( (batch_size, hidden_size), mask_prob=self.config.mask_feature_prob, mask_length=self.config.mask_feature_length, min_masks=self.config.mask_feature_min_masks, ) mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool) mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1) hidden_states[mask_feature_indices] = 0 return hidden_states @add_start_docstrings_to_model_forward(WAV2VEC2_BERT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_PRETRAINED_CHECKPOINT_FOR_DOC, output_type=Wav2Vec2BaseModelOutput, config_class=_CONFIG_FOR_DOC, modality="audio", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, input_features: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, mask_time_indices: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Wav2Vec2BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states, extract_features = self.feature_projection(input_features) hidden_states = self._mask_hidden_states( hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask ) encoder_outputs = self.encoder( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = encoder_outputs[0] if self.intermediate_ffn: expanded_hidden_states = self.intermediate_ffn(hidden_states) hidden_states = hidden_states + 0.5 * expanded_hidden_states if self.adapter is not None: hidden_states = self.adapter(hidden_states, attention_mask=attention_mask) if not return_dict: return (hidden_states, extract_features) + encoder_outputs[1:] return Wav2Vec2BaseModelOutput( last_hidden_state=hidden_states, extract_features=extract_features, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings( """Wav2Vec2Bert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""", WAV2VEC2_BERT_START_DOCSTRING, ) class Wav2Vec2BertForCTC(Wav2Vec2BertPreTrainedModel): # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerForCTC.__init__ with Wav2Vec2Conformer->Wav2Vec2Bert,WAV2VEC2_CONFORMER->WAV2VEC2_BERT,wav2vec2_conformer->wav2vec2_bert def __init__(self, config, target_lang: Optional[str] = None): super().__init__(config) self.wav2vec2_bert = Wav2Vec2BertModel(config) self.dropout = nn.Dropout(config.final_dropout) self.target_lang = target_lang if config.vocab_size is None: raise ValueError( f"You are trying to instantiate {self.__class__} with a configuration that " "does not define the vocabulary size of the language model head. Please " "instantiate the model as follows: `Wav2Vec2BertForCTC.from_pretrained(..., vocab_size=vocab_size)`. " "or define `vocab_size` of your model's configuration." ) output_hidden_size = ( config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size ) self.lm_head = nn.Linear(output_hidden_size, config.vocab_size) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(WAV2VEC2_BERT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_PRETRAINED_CHECKPOINT_FOR_DOC, output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC, expected_output=_CTC_EXPECTED_OUTPUT, expected_loss=_CTC_EXPECTED_LOSS, ) def forward( self, input_features: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, CausalLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*): Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.wav2vec2_bert( input_features, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states) logits = self.lm_head(hidden_states) loss = None if labels is not None: if labels.max() >= self.config.vocab_size: raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}") # retrieve loss input_lengths from attention_mask attention_mask = ( attention_mask if attention_mask is not None else torch.ones(input_features.shape[:2], device=input_features.device, dtype=torch.long) ) input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum([-1])).to(torch.long) # assuming that padded tokens are filled with -100 # when not being attended to labels_mask = labels >= 0 target_lengths = labels_mask.sum(-1) flattened_targets = labels.masked_select(labels_mask) # ctc_loss doesn't support fp16 log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1) with torch.backends.cudnn.flags(enabled=False): loss = nn.functional.ctc_loss( log_probs, flattened_targets, input_lengths, target_lengths, blank=self.config.pad_token_id, reduction=self.config.ctc_loss_reduction, zero_infinity=self.config.ctc_zero_infinity, ) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return CausalLMOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions ) @add_start_docstrings( """ Wav2Vec2Bert Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting. """, WAV2VEC2_BERT_START_DOCSTRING, ) class Wav2Vec2BertForSequenceClassification(Wav2Vec2BertPreTrainedModel): # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.__init__ with Wav2Vec2->Wav2Vec2Bert,wav2vec2->wav2vec2_bert def __init__(self, config): super().__init__(config) if hasattr(config, "add_adapter") and config.add_adapter: raise ValueError( "Sequence classification does not support the use of Wav2Vec2Bert adapters (config.add_adapter=True)" ) self.wav2vec2_bert = Wav2Vec2BertModel(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) # Initialize weights and apply final processing self.post_init() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.wav2vec2_bert.parameters(): param.requires_grad = False @add_start_docstrings_to_model_forward(WAV2VEC2_BERT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_BASE_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, modality="audio", ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.forward with Wav2Vec2->Wav2Vec2Bert,wav2vec2->wav2vec2_bert,WAV_2_VEC_2->WAV2VEC2_BERT, input_values->input_features def forward( self, input_features: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.wav2vec2_bert( input_features, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) if attention_mask is None: pooled_output = hidden_states.mean(dim=1) else: padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask) hidden_states[~padding_mask] = 0.0 pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1) logits = self.classifier(pooled_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Wav2Vec2Bert Model with a frame classification head on top for tasks like Speaker Diarization. """, WAV2VEC2_BERT_START_DOCSTRING, ) class Wav2Vec2BertForAudioFrameClassification(Wav2Vec2BertPreTrainedModel): # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerForAudioFrameClassification.__init__ with Wav2Vec2Conformer->Wav2Vec2Bert,WAV2VEC2_CONFORMER->WAV2VEC2_BERT,wav2vec2_conformer->wav2vec2_bert def __init__(self, config): super().__init__(config) if hasattr(config, "add_adapter") and config.add_adapter: raise ValueError( "Audio frame classification does not support the use of Wav2Vec2Bert adapters (config.add_adapter=True)" ) self.wav2vec2_bert = Wav2Vec2BertModel(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.num_labels = config.num_labels self.init_weights() # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerForAudioFrameClassification.freeze_base_model with wav2vec2_conformer->wav2vec2_bert def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.wav2vec2_bert.parameters(): param.requires_grad = False @add_start_docstrings_to_model_forward(WAV2VEC2_BERT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_BASE_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, modality="audio", ) # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerForAudioFrameClassification.forward with wav2vec2_conformer->wav2vec2_bert, input_values->input_features def forward( self, input_features: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.wav2vec2_bert( input_features, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] logits = self.classifier(hidden_states) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), torch.argmax(labels.view(-1, self.num_labels), axis=1)) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.AMSoftmaxLoss class AMSoftmaxLoss(nn.Module): def __init__(self, input_dim, num_labels, scale=30.0, margin=0.4): super(AMSoftmaxLoss, self).__init__() self.scale = scale self.margin = margin self.num_labels = num_labels self.weight = nn.Parameter(torch.randn(input_dim, num_labels), requires_grad=True) self.loss = nn.CrossEntropyLoss() def forward(self, hidden_states, labels): labels = labels.flatten() weight = nn.functional.normalize(self.weight, dim=0) hidden_states = nn.functional.normalize(hidden_states, dim=1) cos_theta = torch.mm(hidden_states, weight) psi = cos_theta - self.margin onehot = nn.functional.one_hot(labels, self.num_labels) logits = self.scale * torch.where(onehot.bool(), psi, cos_theta) loss = self.loss(logits, labels) return loss # Copied from transformers.models.wav2vec2.modeling_wav2vec2.TDNNLayer class TDNNLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id] self.out_conv_dim = config.tdnn_dim[layer_id] self.kernel_size = config.tdnn_kernel[layer_id] self.dilation = config.tdnn_dilation[layer_id] self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim) self.activation = nn.ReLU() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: if is_peft_available(): from peft.tuners.lora import LoraLayer if isinstance(self.kernel, LoraLayer): warnings.warn( "Detected LoRA on TDNNLayer. LoRA weights won't be applied due to optimization. " "You should exclude TDNNLayer from LoRA's target modules.", ) # for backward compatibility, we keep nn.Linear but call F.conv1d for speed up hidden_states = hidden_states.transpose(1, 2) weight = self.kernel.weight.view(self.out_conv_dim, self.kernel_size, self.in_conv_dim).transpose(1, 2) hidden_states = nn.functional.conv1d(hidden_states, weight, self.kernel.bias, dilation=self.dilation) hidden_states = hidden_states.transpose(1, 2) hidden_states = self.activation(hidden_states) return hidden_states @add_start_docstrings( """ Wav2Vec2Bert Model with an XVector feature extraction head on top for tasks like Speaker Verification. """, WAV2VEC2_BERT_START_DOCSTRING, ) class Wav2Vec2BertForXVector(Wav2Vec2BertPreTrainedModel): # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerForXVector.__init__ with Wav2Vec2Conformer->Wav2Vec2Bert,WAV2VEC2_CONFORMER->WAV2VEC2_BERT,wav2vec2_conformer->wav2vec2_bert def __init__(self, config): super().__init__(config) self.wav2vec2_bert = Wav2Vec2BertModel(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.projector = nn.Linear(config.hidden_size, config.tdnn_dim[0]) tdnn_layers = [TDNNLayer(config, i) for i in range(len(config.tdnn_dim))] self.tdnn = nn.ModuleList(tdnn_layers) self.feature_extractor = nn.Linear(config.tdnn_dim[-1] * 2, config.xvector_output_dim) self.classifier = nn.Linear(config.xvector_output_dim, config.xvector_output_dim) self.objective = AMSoftmaxLoss(config.xvector_output_dim, config.num_labels) self.init_weights() # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerForXVector.freeze_base_model with wav2vec2_conformer->wav2vec2_bert def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.wav2vec2_bert.parameters(): param.requires_grad = False # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerForXVector._get_tdnn_output_lengths def _get_tdnn_output_lengths(self, input_lengths: Union[torch.LongTensor, int]): """ Computes the output length of the TDNN layers """ def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size in self.config.tdnn_kernel: input_lengths = _conv_out_length(input_lengths, kernel_size, 1) return input_lengths @add_start_docstrings_to_model_forward(WAV2VEC2_BERT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_BASE_CHECKPOINT_FOR_DOC, output_type=XVectorOutput, config_class=_CONFIG_FOR_DOC, modality="audio", ) # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerForXVector.forward with wav2vec2_conformer->wav2vec2_bert, input_values->input_features def forward( self, input_features: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, XVectorOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.wav2vec2_bert( input_features, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) for tdnn_layer in self.tdnn: hidden_states = tdnn_layer(hidden_states) # Statistic Pooling if attention_mask is None: mean_features = hidden_states.mean(dim=1) std_features = hidden_states.std(dim=1) else: feat_extract_output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(dim=1)) tdnn_output_lengths = self._get_tdnn_output_lengths(feat_extract_output_lengths) mean_features = [] std_features = [] for i, length in enumerate(tdnn_output_lengths): mean_features.append(hidden_states[i, :length].mean(dim=0)) std_features.append(hidden_states[i, :length].std(dim=0)) mean_features = torch.stack(mean_features) std_features = torch.stack(std_features) statistic_pooling = torch.cat([mean_features, std_features], dim=-1) output_embeddings = self.feature_extractor(statistic_pooling) logits = self.classifier(output_embeddings) loss = None if labels is not None: loss = self.objective(logits, labels) if not return_dict: output = (logits, output_embeddings) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return XVectorOutput( loss=loss, logits=logits, embeddings=output_embeddings, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/wav2vec2_bert/configuration_wav2vec2_bert.py
# coding=utf-8 # Copyright 2024 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Wav2Vec2Bert model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) from ..deprecated._archive_maps import WAV2VEC2_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class Wav2Vec2BertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Wav2Vec2BertModel`]. It is used to instantiate an Wav2Vec2Bert model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Wav2Vec2Bert [facebook/wav2vec2-bert-rel-pos-large](https://huggingface.co/facebook/wav2vec2-bert-rel-pos-large) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*): Vocabulary size of the Wav2Vec2Bert model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Wav2Vec2BertModel`]. Vocabulary size of the model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward method of [`Wav2Vec2BertModel`]. hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. feature_projection_input_dim (`int`, *optional*, defaults to 160): Input dimension of this model, i.e the dimension after processing input audios with [`SeamlessM4TFeatureExtractor`] or [`Wav2Vec2BertProcessor`]. hidden_act (`str` or `function`, *optional*, defaults to `"swish"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported. hidden_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. feat_proj_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for the feature projection. final_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for the final projection layer of [`Wav2Vec2BertForCTC`]. layerdrop (`float`, *optional*, defaults to 0.1): The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. apply_spec_augment (`bool`, *optional*, defaults to `True`): Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see [SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition](https://arxiv.org/abs/1904.08779). mask_time_prob (`float`, *optional*, defaults to 0.05): Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procecure generates `mask_time_prob*len(time_axis)/mask_time_length ``independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_time_length (`int`, *optional*, defaults to 10): Length of vector span along the time axis. mask_time_min_masks (`int`, *optional*, defaults to 2): The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if `mask_time_prob*len(time_axis)/mask_time_length < mask_time_min_masks`. mask_feature_prob (`float`, *optional*, defaults to 0.0): Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procecure generates `mask_feature_prob*len(feature_axis)/mask_time_length` independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_feature_length (`int`, *optional*, defaults to 10): Length of vector span along the feature axis. mask_feature_min_masks (`int`, *optional*, defaults to 0): The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if `mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks`. ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`): Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an instance of [`Wav2Vec2BertForCTC`]. ctc_zero_infinity (`bool`, *optional*, defaults to `False`): Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of [`Wav2Vec2BertForCTC`]. use_weighted_layer_sum (`bool`, *optional*, defaults to `False`): Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of [`Wav2Vec2BertForSequenceClassification`]. classifier_proj_size (`int`, *optional*, defaults to 768): Dimensionality of the projection before token mean-pooling for classification. tdnn_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 1500)`): A tuple of integers defining the number of output channels of each 1D convolutional layer in the *TDNN* module of the *XVector* model. The length of *tdnn_dim* defines the number of *TDNN* layers. tdnn_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 3, 3, 1, 1)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the *TDNN* module of the *XVector* model. The length of *tdnn_kernel* has to match the length of *tdnn_dim*. tdnn_dilation (`Tuple[int]` or `List[int]`, *optional*, defaults to `(1, 2, 3, 1, 1)`): A tuple of integers defining the dilation factor of each 1D convolutional layer in *TDNN* module of the *XVector* model. The length of *tdnn_dilation* has to match the length of *tdnn_dim*. xvector_output_dim (`int`, *optional*, defaults to 512): Dimensionality of the *XVector* embedding vectors. pad_token_id (`int`, *optional*, defaults to 0): The id of the _beginning-of-stream_ token. bos_token_id (`int`, *optional*, defaults to 1): The id of the _padding_ token. eos_token_id (`int`, *optional*, defaults to 2): The id of the _end-of-stream_ token. add_adapter (`bool`, *optional*, defaults to `False`): Whether a convolutional attention network should be stacked on top of the Wav2Vec2Bert Encoder. Can be very useful for warm-starting Wav2Vec2Bert for SpeechEncoderDecoder models. adapter_kernel_size (`int`, *optional*, defaults to 3): Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`. adapter_stride (`int`, *optional*, defaults to 2): Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`. num_adapter_layers (`int`, *optional*, defaults to 1): Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is True`. adapter_act (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the adapter layers. If string, `"gelu"`, `"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported. use_intermediate_ffn_before_adapter (`bool`, *optional*, defaults to `False`): Whether an intermediate feed-forward block should be stacked on top of the Wav2Vec2Bert Encoder and before the adapter network. Only relevant if `add_adapter is True`. output_hidden_size (`int`, *optional*): Dimensionality of the encoder output layer. If not defined, this defaults to *hidden-size*. Only relevant if `add_adapter is True`. position_embeddings_type (`str`, *optional*, defaults to `"relative_key"`): Can be specified to : - `rotary`, for rotary position embeddings. - `relative`, for relative position embeddings. - `relative_key`, for relative position embeddings as defined by Shaw in [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). If left to `None`, no relative position embeddings is applied. rotary_embedding_base (`int`, *optional*, defaults to 10000): If `"rotary"` position embeddings are used, defines the size of the embedding base. max_source_positions (`int`, *optional*, defaults to 5000): if `"relative"` position embeddings are used, defines the maximum source input positions. left_max_position_embeddings (`int`, *optional*, defaults to 64): If `"relative_key"` (aka Shaw) position embeddings are used, defines the left clipping value for relative positions. right_max_position_embeddings (`int`, *optional*, defaults to 8): If `"relative_key"` (aka Shaw) position embeddings are used, defines the right clipping value for relative positions. conv_depthwise_kernel_size (`int`, *optional*, defaults to 31): Kernel size of convolutional depthwise 1D layer in Conformer blocks. conformer_conv_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all convolutional layers in Conformer blocks. Example: ```python >>> from transformers import Wav2Vec2BertConfig, Wav2Vec2BertModel >>> # Initializing a Wav2Vec2Bert facebook/wav2vec2-bert-rel-pos-large style configuration >>> configuration = Wav2Vec2BertConfig() >>> # Initializing a model (with random weights) from the facebook/wav2vec2-bert-rel-pos-large style configuration >>> model = Wav2Vec2BertModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "wav2vec2-bert" def __init__( self, vocab_size=None, hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, intermediate_size=4096, feature_projection_input_dim=160, hidden_act="swish", hidden_dropout=0.0, activation_dropout=0.0, attention_dropout=0.0, feat_proj_dropout=0.0, final_dropout=0.1, layerdrop=0.1, initializer_range=0.02, layer_norm_eps=1e-5, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, ctc_loss_reduction="sum", ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=768, tdnn_dim=(512, 512, 512, 512, 1500), tdnn_kernel=(5, 3, 3, 1, 1), tdnn_dilation=(1, 2, 3, 1, 1), xvector_output_dim=512, pad_token_id=0, bos_token_id=1, eos_token_id=2, add_adapter=False, adapter_kernel_size=3, adapter_stride=2, num_adapter_layers=1, adapter_act="relu", use_intermediate_ffn_before_adapter=False, output_hidden_size=None, position_embeddings_type="relative_key", rotary_embedding_base=10000, max_source_positions=5000, left_max_position_embeddings=64, right_max_position_embeddings=8, conv_depthwise_kernel_size=31, conformer_conv_dropout=0.1, **kwargs, ): super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.num_attention_heads = num_attention_heads self.feature_projection_input_dim = feature_projection_input_dim self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.feat_proj_dropout = feat_proj_dropout self.final_dropout = final_dropout self.layerdrop = layerdrop self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.vocab_size = vocab_size self.use_weighted_layer_sum = use_weighted_layer_sum self.max_source_positions = max_source_positions if position_embeddings_type is not None and position_embeddings_type not in [ "rotary", "relative", "relative_key", ]: raise ValueError( """ `position_embeddings_type` is not valid. It must be one of the following values: `["rotary", "relative", "relative_key"]` or left as `None`. """ ) self.position_embeddings_type = position_embeddings_type self.rotary_embedding_base = rotary_embedding_base self.left_max_position_embeddings = left_max_position_embeddings self.right_max_position_embeddings = right_max_position_embeddings # Conformer-block related self.conv_depthwise_kernel_size = conv_depthwise_kernel_size self.conformer_conv_dropout = conformer_conv_dropout # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 self.apply_spec_augment = apply_spec_augment self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.mask_time_min_masks = mask_time_min_masks self.mask_feature_prob = mask_feature_prob self.mask_feature_length = mask_feature_length self.mask_feature_min_masks = mask_feature_min_masks # ctc loss self.ctc_loss_reduction = ctc_loss_reduction self.ctc_zero_infinity = ctc_zero_infinity # adapter self.add_adapter = add_adapter self.adapter_kernel_size = adapter_kernel_size self.adapter_stride = adapter_stride self.num_adapter_layers = num_adapter_layers self.adapter_act = adapter_act self.output_hidden_size = output_hidden_size if output_hidden_size is not None else hidden_size if use_intermediate_ffn_before_adapter and not add_adapter: raise ValueError("`use_intermediate_ffn_before_adapter` is `True` but `add_adapter` is `False`.") self.use_intermediate_ffn_before_adapter = use_intermediate_ffn_before_adapter # SequenceClassification-specific parameter. Feel free to ignore for other classes. self.classifier_proj_size = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. self.tdnn_dim = list(tdnn_dim) self.tdnn_kernel = list(tdnn_kernel) self.tdnn_dilation = list(tdnn_dilation) self.xvector_output_dim = xvector_output_dim @property def inputs_to_logits_ratio(self): ratio = self.feature_projection_input_dim * 2 if self.add_adapter: ratio = ratio * (self.adapter_stride**self.num_adapter_layers) return ratio
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/wav2vec2_bert/convert_wav2vec2_seamless_checkpoint.py
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Wav2Vec2Bert BERT checkpoint.""" import argparse import torch import torchaudio from fairseq2.data import Collater from fairseq2.data.audio import WaveformToFbankConverter from fairseq2.nn.padding import get_seqs_and_padding_mask from seamless_communication.models.conformer_shaw import load_conformer_shaw_model from transformers import ( SeamlessM4TFeatureExtractor, Wav2Vec2BertConfig, Wav2Vec2BertModel, logging, ) logging.set_verbosity_info() logger = logging.get_logger(__name__) wav2vec_convert_list = [ ("encoder_frontend.model_dim_proj", "feature_projection.projection"), ("encoder_frontend.post_extract_layer_norm", "feature_projection.layer_norm"), ("encoder_frontend.pos_encoder.conv", "encoder.pos_conv_embed.conv"), ("encoder.inner.layers", "encoder.layers"), ("encoder.inner_layer_norm", "encoder.layer_norm"), ("encoder.adaptor_layers", "adapter.layers"), ("inner_proj", "intermediate_dense"), ("self_attn.output_proj", "self_attn.linear_out"), ("output_proj", "output_dense"), ("self_attn.k_proj", "self_attn.linear_k"), ("self_attn.v_proj", "self_attn.linear_v"), ("self_attn.q_proj", "self_attn.linear_q"), ("self_attn.sdpa.u_bias", "self_attn.pos_bias_u"), ("self_attn.sdpa.v_bias", "self_attn.pos_bias_v"), ("self_attn.sdpa.rel_k_embed", "self_attn.distance_embedding"), ("self_attn.sdpa.r_proj", "self_attn.linear_pos"), ("conv.pointwise_conv1", "conv_module.pointwise_conv1"), ("conv.pointwise_conv2", "conv_module.pointwise_conv2"), ("conv.depthwise_conv", "conv_module.depthwise_conv"), ("conv.layer_norm", "conv_module.depthwise_layer_norm"), ("conv_layer_norm", "conv_module.layer_norm"), ("encoder.proj1", "intermediate_ffn.intermediate_dense"), ("encoder.proj2", "intermediate_ffn.output_dense"), ("encoder.layer_norm", "inner_layer_norm"), ("masker.temporal_mask_embed", "masked_spec_embed"), ] keys_to_remove = { "quantizer.entry_proj", "final_proj", "final_target_proj", "quantizer.entries", "quantizer.num_updates", } def param_count(model): return sum(p[1].numel() for p in model.named_parameters() if "final_proj" not in p[0]) def _convert_model( original_model, hf_model, convert_list, ): state_dict = original_model.state_dict() for k, v in list(state_dict.items()): new_key = k for old_layer_name, new_layer_name in convert_list: if old_layer_name in new_key: new_key = new_key.replace(old_layer_name, new_layer_name) # must do it by hand if ".layer_norm" in new_key and new_key.split(".layer_norm")[0][-1].isnumeric(): new_key = new_key.replace("layer_norm", "final_layer_norm") add_key = True for key in keys_to_remove: if key in new_key: state_dict.pop(k) add_key = False break if add_key: state_dict[new_key] = state_dict.pop(k) extra_keys = set(state_dict.keys()) - set(hf_model.state_dict().keys()) extra_keys = set({k for k in extra_keys if "num_updates" not in k}) # filter unecessary param missing_keys = set(hf_model.state_dict().keys()) - set(state_dict.keys()) if len(extra_keys) != 0: raise ValueError(f"extra keys found: {extra_keys}") if len(missing_keys) != 0: raise ValueError(f"missing keys: {missing_keys}") hf_model.load_state_dict(state_dict, strict=True) n_params = param_count(hf_model) logger.info(f"model loaded: {round(n_params/1e6,1)}M params") hf_model.eval() del state_dict return hf_model @torch.no_grad() def convert_wav2vec2_bert_checkpoint( checkpoint_path, pytorch_dump_folder_path, config_path=None, repo_id=None, ): """ Copy/paste/tweak model's weights to transformers design. """ if config_path is not None: config = Wav2Vec2BertConfig.from_pretrained(config_path, hidden_act="swish") else: config = Wav2Vec2BertConfig(apply_spec_augment=False) hf_wav2vec = Wav2Vec2BertModel(config) model = load_conformer_shaw_model(checkpoint_path, dtype=torch.float32) model.eval() hf_wav2vec = _convert_model(model, hf_wav2vec, wav2vec_convert_list) hf_wav2vec.save_pretrained(pytorch_dump_folder_path) if repo_id: hf_wav2vec.push_to_hub(repo_id, create_pr=True) # save feature extractor fe = SeamlessM4TFeatureExtractor(padding_value=1) fe._set_processor_class("Wav2Vec2BertProcessor") fe.save_pretrained(pytorch_dump_folder_path) if repo_id: fe.push_to_hub(repo_id, create_pr=True) if args.audio_path: waveform, sample_rate = torchaudio.load(args.audio_path) waveform = torchaudio.functional.resample(waveform, sample_rate, fe.sampling_rate) fbank_converter = WaveformToFbankConverter( num_mel_bins=80, waveform_scale=2**15, channel_last=True, standardize=True, dtype=torch.float32, ) collater = Collater(pad_value=1) decoded_audio = {"waveform": waveform.T, "sample_rate": fe.sampling_rate, "format": -1} src = collater(fbank_converter(decoded_audio))["fbank"] seqs, padding_mask = get_seqs_and_padding_mask(src) with torch.inference_mode(): seqs, padding_mask = model.encoder_frontend(seqs, padding_mask) original_output, padding_mask = model.encoder(seqs, padding_mask) hf_wav2vec.eval() inputs = fe(waveform, return_tensors="pt", padding=True) with torch.no_grad(): outputs = hf_wav2vec(**inputs) torch.testing.assert_close(original_output, outputs.last_hidden_state, atol=5e-3, rtol=5e-3) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.", ) parser.add_argument( "--checkpoint_path", default="conformer_shaw", type=str, help="Path to seamless communication checkpoint" ) parser.add_argument( "--config_path", default=None, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--repo_id", default=None, type=str, help="Push to this repo id if precised.") parser.add_argument( "--audio_path", default=None, type=str, help="If specified, check that the original model and the converted model produce the same outputs.", ) args = parser.parse_args() convert_wav2vec2_bert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.repo_id )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/wav2vec2_bert/processing_wav2vec2_bert.py
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Speech processor class for Wav2Vec2-BERT """ import warnings from ...processing_utils import ProcessorMixin from ..seamless_m4t.feature_extraction_seamless_m4t import SeamlessM4TFeatureExtractor from ..wav2vec2.tokenization_wav2vec2 import Wav2Vec2CTCTokenizer class Wav2Vec2BertProcessor(ProcessorMixin): r""" Constructs a Wav2Vec2-BERT processor which wraps a Wav2Vec2-BERT feature extractor and a Wav2Vec2 CTC tokenizer into a single processor. [`Wav2Vec2Processor`] offers all the functionalities of [`SeamlessM4TFeatureExtractor`] and [`PreTrainedTokenizer`]. See the docstring of [`~Wav2Vec2Processor.__call__`] and [`~Wav2Vec2Processor.decode`] for more information. Args: feature_extractor (`SeamlessM4TFeatureExtractor`): An instance of [`SeamlessM4TFeatureExtractor`]. The feature extractor is a required input. tokenizer ([`PreTrainedTokenizer`]): An instance of [`PreTrainedTokenizer`]. The tokenizer is a required input. """ feature_extractor_class = "SeamlessM4TFeatureExtractor" tokenizer_class = "AutoTokenizer" def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): try: return super().from_pretrained(pretrained_model_name_or_path, **kwargs) except OSError: warnings.warn( f"Loading a tokenizer inside {cls.__name__} from a config that does not" " include a `tokenizer_class` attribute is deprecated and will be " "removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`" " attribute to either your `config.json` or `tokenizer_config.json` " "file to suppress this warning: ", FutureWarning, ) feature_extractor = SeamlessM4TFeatureExtractor.from_pretrained(pretrained_model_name_or_path, **kwargs) tokenizer = Wav2Vec2CTCTokenizer.from_pretrained(pretrained_model_name_or_path, **kwargs) return cls(feature_extractor=feature_extractor, tokenizer=tokenizer) def __call__(self, audio=None, text=None, **kwargs): """ Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `audio` and `kwargs` arguments to SeamlessM4TFeatureExtractor's [`~SeamlessM4TFeatureExtractor.__call__`] if `audio` is not `None` to pre-process the audio. To prepare the target sequences(s), this method forwards the `text` and `kwargs` arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.__call__`] if `text` is not `None`. Please refer to the doctsring of the above two methods for more information. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). audio (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`): The audio or batch of audios to be prepared. Each audio can be NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each audio should be of shape (C, T), where C is a number of channels, and T the sample length of the audio. kwargs (*optional*): Remaining dictionary of keyword arguments that will be passed to the feature extractor and/or the tokenizer. Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_features** -- Audio input features to be fed to a model. Returned when `audio` is not `None`. - **attention_mask** -- List of indices specifying which timestamps should be attended to by the model when `audio` is not `None`. When only `text` is specified, returns the token attention mask. - **labels** -- List of token ids to be fed to a model. Returned when both `text` and `audio` are not `None`. - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None` and `audio` is `None`. """ sampling_rate = kwargs.pop("sampling_rate", None) if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process.") if audio is not None: inputs = self.feature_extractor(audio, sampling_rate=sampling_rate, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif audio is None: return encodings else: inputs["labels"] = encodings["input_ids"] return inputs def pad(self, input_features=None, labels=None, **kwargs): """ If `input_features` is not `None`, this method forwards the `input_features` and `kwargs` arguments to SeamlessM4TFeatureExtractor's [`~SeamlessM4TFeatureExtractor.pad`] to pad the input features. If `labels` is not `None`, this method forwards the `labels` and `kwargs` arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.pad`] to pad the label(s). Please refer to the doctsring of the above two methods for more information. """ if input_features is None and labels is None: raise ValueError("You need to specify either an `input_features` or `labels` input to pad.") if input_features is not None: input_features = self.feature_extractor.pad(input_features, **kwargs) if labels is not None: labels = self.tokenizer.pad(labels, **kwargs) if labels is None: return input_features elif input_features is None: return labels else: input_features["labels"] = labels["input_ids"] return input_features def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/wav2vec2_bert/__init__.py
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = { "configuration_wav2vec2_bert": [ "WAV2VEC2_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2BertConfig", ], "processing_wav2vec2_bert": ["Wav2Vec2BertProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_wav2vec2_bert"] = [ "WAV2VEC2_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "Wav2Vec2BertForAudioFrameClassification", "Wav2Vec2BertForCTC", "Wav2Vec2BertForSequenceClassification", "Wav2Vec2BertForXVector", "Wav2Vec2BertModel", "Wav2Vec2BertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_wav2vec2_bert import ( WAV2VEC2_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, Wav2Vec2BertConfig, ) from .processing_wav2vec2_bert import Wav2Vec2BertProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wav2vec2_bert import ( WAV2VEC2_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, Wav2Vec2BertForAudioFrameClassification, Wav2Vec2BertForCTC, Wav2Vec2BertForSequenceClassification, Wav2Vec2BertForXVector, Wav2Vec2BertModel, Wav2Vec2BertPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/mamba/convert_mamba_ssm_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2024 state-spaces/mamba org and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This script can be used to convert checkpoints provided in the `mamba_ssm` library into the format provided in HuggingFace `transformers`. It depends on the `mamba_ssm` package to be installed.""" import argparse import json import math from typing import Tuple import torch from transformers import AutoTokenizer, MambaConfig, MambaForCausalLM from transformers.utils import logging from transformers.utils.import_utils import is_mamba_ssm_available if is_mamba_ssm_available(): from mamba_ssm.models.config_mamba import MambaConfig as MambaConfigSSM from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel def convert_ssm_config_to_hf_config(config_ssm: MambaConfigSSM) -> MambaConfig: """Convert a MambaConfig from mamba_ssm to a MambaConfig from transformers.""" hf_config = MambaConfig() # Set config hidden size, num hidden layers, and vocab size directly from the original config hf_config.hidden_size = config_ssm.d_model hf_config.intermediate_size = config_ssm.d_model * 2 hf_config.time_step_rank = math.ceil(config_ssm.d_model / 16) hf_config.num_hidden_layers = config_ssm.n_layer vocab_size = config_ssm.vocab_size pad_vocab_size_multiple = config_ssm.pad_vocab_size_multiple if (vocab_size % pad_vocab_size_multiple) != 0: vocab_size += pad_vocab_size_multiple - (vocab_size % pad_vocab_size_multiple) hf_config.vocab_size = vocab_size return hf_config logging.set_verbosity_info() logger = logging.get_logger(__name__) def convert_mamba_ssm_checkpoint_to_huggingface_model( original_state_dict: dict, original_ssm_config_dict: dict ) -> Tuple[MambaForCausalLM, AutoTokenizer]: if not is_mamba_ssm_available(): raise ImportError( "Calling convert_mamba_ssm_checkpoint_to_huggingface_model requires the mamba_ssm library to be installed. Please install it with `pip install mamba_ssm`." ) original_ssm_config = MambaConfigSSM(**original_ssm_config_dict) # Convert mamba_ssm config to huggingface MambaConfig hf_config = convert_ssm_config_to_hf_config(original_ssm_config) # No weights need to be renamed between the two models. converted_state_dict = original_state_dict # Load reshaped state dict into a huggingface model. hf_model = MambaForCausalLM(hf_config) tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b") hf_model.load_state_dict(converted_state_dict) return (hf_model, tokenizer) def validate_converted_model( original_state_dict: dict, original_ssm_config_dict: dict, hf_model: MambaForCausalLM, tokenizer: AutoTokenizer ) -> None: """Validate the converted model returns the same output as the original model.""" torch_device = "cuda" original_config = MambaConfigSSM(**original_ssm_config_dict) original_model = MambaLMHeadModel(original_config).to(torch_device) original_model.load_state_dict(original_state_dict) hf_model = hf_model.to(torch_device) input_ids = tokenizer("Hey how are you doing?", return_tensors="pt")["input_ids"].to(torch_device) # Assert model logits are close with torch.no_grad(): original_model_logits = original_model(input_ids).logits hf_model_logits = hf_model(input_ids).logits if not torch.allclose(original_model_logits, hf_model_logits, atol=1e-3): raise ValueError("The converted model did not return the same logits as the original model.") logger.info("Model conversion validated successfully.") def convert_mamba_checkpoint_file_to_huggingface_model_file( mamba_checkpoint_path: str, config_json_file: str, output_dir: str ) -> None: if not is_mamba_ssm_available(): raise ImportError( "Calling convert_mamba_checkpoint_file_to_huggingface_model_file requires the mamba_ssm library to be installed. Please install it with `pip install mamba_ssm`." ) if not torch.cuda.is_available(): raise ValueError( "This script is to be run with a CUDA device, as the original mamba_ssm model does not support cpu." ) logger.info(f"Loading model from {mamba_checkpoint_path} based on config from {config_json_file}") # Load weights and config from paths original_state_dict = torch.load(mamba_checkpoint_path, map_location="cpu") with open(config_json_file, "r", encoding="utf-8") as json_file: original_ssm_config_dict = json.load(json_file) # Convert the model hf_model, tokenizer = convert_mamba_ssm_checkpoint_to_huggingface_model( original_state_dict, original_ssm_config_dict ) # Validate the conversion validate_converted_model(original_state_dict, original_ssm_config_dict, hf_model, tokenizer) logger.info(f"Model converted successfully. Saving model to {output_dir}") # Save new model to pytorch_dump_path hf_model.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "-i", "--mamba_checkpoint_file", type=str, required=True, help="Path to a `pytorch_model.bin` mamba_ssm checkpoint file to be converted.", ) parser.add_argument( "-c", "--config_json_file", type=str, required=True, help="Path to a `config.json` file corresponding to a MambaConfig of the original mamba_ssm model.", ) parser.add_argument( "-o", "--output_dir", type=str, required=True, help="Path to directory to save the converted output model to." ) args = parser.parse_args() convert_mamba_checkpoint_file_to_huggingface_model_file( args.mamba_checkpoint_file, args.config_json_file, args.output_dir )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/mamba/configuration_mamba.py
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """MAMBA configuration""" import math from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) from ..deprecated._archive_maps import MAMBA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class MambaConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`MambaModel`]. It is used to instantiate a MAMBA model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MAMBA [state-spaces/mamba-2.8b](https://huggingface.co/state-spaces/mamba-2.8b) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50280): Vocabulary size of the MAMBA model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MambaModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the embeddings and hidden states. state_size (`int`, *optional*, defaults to 16): shape of the state space latents. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the model. layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): The epsilon to use in the layer normalization layers. pad_token_id (`int`, *optional*, defaults to 0): Padding token id. bos_token_id (`int`, *optional*, defaults to 0): The id of the beginning of sentence token in the vocabulary. eos_token_id (`int`, *optional*, defaults to 0): The id of the end of sentence token in the vocabulary. expand (`int`, *optional*, defaults to 2): Expanding factor used to determine the intermediate size. conv_kernel (`int`, *optional*, defaults to 4): Size of the convolution kernel. use_bias (`bool`, *optional*, defaults to `False`): Whether or not to use bias in ["in_proj", "out_proj"] of the mixer block use_conv_bias (`bool`, *optional*, defaults to `True`): Whether or not to use bias in the convolution layer of the mixer block. hidden_act (`str`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. initializer_range (`float`, *optional*, defaults to 0.1): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. residual_in_fp32 (`bool`, *optional*, defaults to `True`): Whether or not residuals should be in `float32`. If set to `False` residuals will keep the same `dtype` as the rest of the model time_step_rank (`Union[int,str]`, *optional*, defaults to `"auto"`): Rank of the discretization projection matrix. `"auto"` means that it will default to `math.ceil(self.hidden_size / 16)` time_step_scale (`float`, *optional*, defaults to 1.0): Scale used used to scale `dt_proj.bias`. time_step_min (`float`, *optional*, defaults to 0.001): Minimum `time_step` used to bound `dt_proj.bias`. time_step_max (`float`, *optional*, defaults to 0.1): Maximum `time_step` used to bound `dt_proj.bias`. time_step_init_scheme (`float`, *optional*, defaults to `"random"`): Init scheme used for `dt_proj.weight`. Should be one of `["random","uniform"]` time_step_floor (`float`, *optional*, defaults to 0.0001): Minimum clamping value of the `dt_proj.bias` layer initialization. rescale_prenorm_residual (`bool`, *optional*, defaults to `False`): Whether or not to rescale `out_proj` weights when initializing. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the cache should be used. Example: ```python >>> from transformers import MambaConfig, MambaModel >>> # Initializing a Mamba configuration >>> configuration = MambaConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = MambaModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mamba" def __init__( self, vocab_size=50280, hidden_size=768, state_size=16, num_hidden_layers=32, layer_norm_epsilon=1e-5, pad_token_id=0, bos_token_id=0, eos_token_id=0, expand=2, conv_kernel=4, use_bias=False, use_conv_bias=True, hidden_act="silu", initializer_range=0.1, residual_in_fp32=True, time_step_rank="auto", time_step_scale=1.0, time_step_min=0.001, time_step_max=0.1, time_step_init_scheme="random", time_step_floor=1e-4, rescale_prenorm_residual=False, use_cache=True, **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.state_size = state_size self.num_hidden_layers = num_hidden_layers self.layer_norm_epsilon = layer_norm_epsilon self.conv_kernel = conv_kernel self.expand = expand self.intermediate_size = int(expand * self.hidden_size) self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.use_bias = use_bias self.use_conv_bias = use_conv_bias self.hidden_act = hidden_act self.initializer_range = initializer_range self.time_step_rank = math.ceil(self.hidden_size / 16) if time_step_rank == "auto" else time_step_rank self.time_step_scale = time_step_scale self.time_step_min = time_step_min self.time_step_max = time_step_max self.time_step_init_scheme = time_step_init_scheme self.time_step_floor = time_step_floor self.rescale_prenorm_residual = rescale_prenorm_residual self.residual_in_fp32 = residual_in_fp32 self.use_cache = use_cache super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, **kwargs)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/mamba/__init__.py
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _import_structure = { "configuration_mamba": ["MAMBA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MambaConfig", "MambaOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_mamba"] = [ "MAMBA_PRETRAINED_MODEL_ARCHIVE_LIST", "MambaForCausalLM", "MambaModel", "MambaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mamba import MAMBA_PRETRAINED_CONFIG_ARCHIVE_MAP, MambaConfig, MambaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mamba import ( MAMBA_PRETRAINED_MODEL_ARCHIVE_LIST, MambaForCausalLM, MambaModel, MambaPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/mamba/modeling_mamba.py
# coding=utf-8 # Copyright 2024 state-spaces/mamba org and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch MAMBA model.""" import math from dataclasses import dataclass from typing import Any, Dict, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, ) from ...utils.import_utils import is_causal_conv1d_available, is_mamba_ssm_available from .configuration_mamba import MambaConfig logger = logging.get_logger(__name__) if is_mamba_ssm_available(): from mamba_ssm.ops.selective_scan_interface import mamba_inner_fn, selective_scan_fn from mamba_ssm.ops.triton.selective_state_update import selective_state_update else: selective_state_update, selective_scan_fn, mamba_inner_fn = None, None, None if is_causal_conv1d_available(): from causal_conv1d import causal_conv1d_fn, causal_conv1d_update else: causal_conv1d_update, causal_conv1d_fn = None, None is_fast_path_available = all( (selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn) ) _CHECKPOINT_FOR_DOC = "state-spaces/mamba-130m-hf" _CONFIG_FOR_DOC = "MambaConfig" from ..deprecated._archive_maps import MAMBA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 class MambaCache: """ Arguments: config: MambaConfig batch_size: int dtype: torch.dtype device: torch.device Attributes: seqlen_offset: int dtype: torch.dtype conv_states: Dict[int, torch.Tensor] # layer_idx -> [batch_size, intermediate_size, conv_kernel_size] ssm_states: Dict[int, torch.Tensor] # layer_idx -> [batch_size, intermediate_size, ssm_state_size] """ def __init__( self, config: MambaConfig, batch_size: int, dtype: torch.dtype = torch.float16, device: Optional[str] = None ): self.seqlen_offset = 0 self.dtype = dtype intermediate_size = config.intermediate_size ssm_state_size = config.state_size conv_kernel_size = config.conv_kernel self.conv_states = { i: torch.zeros(batch_size, intermediate_size, conv_kernel_size, device=device, dtype=dtype) for i in range(config.num_hidden_layers) } self.ssm_states = { i: torch.zeros(batch_size, intermediate_size, ssm_state_size, device=device, dtype=dtype) for i in range(config.num_hidden_layers) } class MambaMixer(nn.Module): """ Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`. A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective) ∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4, and is why Mamba is called **selective** state spaces) """ def __init__(self, config: MambaConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.ssm_state_size = config.state_size self.conv_kernel_size = config.conv_kernel self.intermediate_size = config.intermediate_size self.time_step_rank = int(config.time_step_rank) self.layer_idx = layer_idx self.use_conv_bias = config.use_conv_bias self.conv1d = nn.Conv1d( in_channels=self.intermediate_size, out_channels=self.intermediate_size, bias=config.use_conv_bias, kernel_size=config.conv_kernel, groups=self.intermediate_size, padding=config.conv_kernel - 1, ) self.activation = config.hidden_act self.act = ACT2FN[config.hidden_act] # projection of the input hidden states self.in_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=config.use_bias) # selective projection used to make dt, B and C input dependant self.x_proj = nn.Linear(self.intermediate_size, self.time_step_rank + self.ssm_state_size * 2, bias=False) # time step projection (discretization) self.dt_proj = nn.Linear(self.time_step_rank, self.intermediate_size, bias=True) # S4D real initialization. These are not discretized! # The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded A = torch.arange(1, self.ssm_state_size + 1, dtype=torch.float32)[None, :] A = A.expand(self.intermediate_size, -1).contiguous() self.A_log = nn.Parameter(torch.log(A)) self.D = nn.Parameter(torch.ones(self.intermediate_size)) self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias) self.use_bias = config.use_bias if not is_fast_path_available: logger.warning_once( "The fast path is not available because on of `(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)`" " is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and" " https://github.com/Dao-AILab/causal-conv1d" ) def cuda_kernels_forward(self, hidden_states: torch.Tensor, cache_params: Optional[MambaCache] = None): # 1. Gated MLP's linear projection projected_states = self.in_proj(hidden_states).transpose(1, 2) if self.training and cache_params is None: # Doesn't support outputting the states -> used for training contextualized_states = mamba_inner_fn( projected_states, self.conv1d.weight, self.conv1d.bias if self.use_conv_bias else None, self.x_proj.weight, self.dt_proj.weight, self.out_proj.weight, self.out_proj.bias.float() if self.use_bias else None, -torch.exp(self.A_log.float()), None, # input-dependent B None, # input-dependent C self.D.float(), delta_bias=self.dt_proj.bias.float(), delta_softplus=True, ) else: hidden_states, gate = projected_states.chunk(2, dim=1) # 2. Convolution sequence transformation conv_weights = self.conv1d.weight.view(self.conv1d.weight.size(0), self.conv1d.weight.size(2)) if cache_params is not None and cache_params.seqlen_offset > 0: hidden_states = causal_conv1d_update( hidden_states.squeeze(-1), cache_params.conv_states[self.layer_idx], conv_weights, self.conv1d.bias, self.activation, ) hidden_states = hidden_states.unsqueeze(-1) else: if cache_params is not None: conv_states = nn.functional.pad( hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0) ) cache_params.conv_states[self.layer_idx].copy_(conv_states) hidden_states = causal_conv1d_fn( hidden_states, conv_weights, self.conv1d.bias, activation=self.activation ) # 3. State Space Model sequence transformation # 3.a. input varying initialization of time_step, B and C ssm_parameters = self.x_proj(hidden_states.transpose(1, 2)) time_step, B, C = torch.split( ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1 ) discrete_time_step = self.dt_proj.weight @ time_step.transpose(1, 2) A = -torch.exp(self.A_log.float()) # 3.c perform the recurrence y ← SSM(A, B, C)(x) time_proj_bias = self.dt_proj.bias.float() if hasattr(self.dt_proj, "bias") else None if cache_params is not None and cache_params.seqlen_offset > 0: scan_outputs = selective_state_update( cache_params.ssm_states[self.layer_idx], hidden_states[..., 0], discrete_time_step[..., 0], A, B[:, 0], C[:, 0], self.D, gate[..., 0], time_proj_bias, dt_softplus=True, ).unsqueeze(-1) else: scan_outputs, ssm_state = selective_scan_fn( hidden_states, discrete_time_step, A, B.transpose(1, 2), C.transpose(1, 2), self.D.float(), gate, time_proj_bias, delta_softplus=True, return_last_state=True, ) if ssm_state is not None and cache_params is not None: cache_params.ssm_states[self.layer_idx].copy_(ssm_state) # 4. Final linear projection contextualized_states = self.out_proj(scan_outputs.transpose(1, 2)) return contextualized_states # fmt: off def slow_forward(self, input_states, cache_params: Optional[MambaCache]=None): batch_size, seq_len, _ = input_states.shape dtype = input_states.dtype # 1. Gated MLP's linear projection projected_states = self.in_proj(input_states).transpose(1, 2) # [batch, 2 * intermediate_size, seq_len] hidden_states, gate = projected_states.chunk(2, dim=1) # 2. Convolution sequence transformation if cache_params is not None: ssm_state = cache_params.ssm_states[self.layer_idx].clone() if cache_params.seqlen_offset > 0: conv_state = cache_params.conv_states[self.layer_idx] # [batch, intermediate_size, conv_kernel_size] conv_state = torch.roll(conv_state, shifts=-1, dims=-1) conv_state[:, :, -1] = hidden_states[:, :, 0] cache_params.conv_states[self.layer_idx].copy_(conv_state) hidden_states = torch.sum(conv_state * self.conv1d.weight[:, 0, :], dim=-1) if self.use_conv_bias: hidden_states += self.conv1d.bias hidden_states = self.act(hidden_states).to(dtype).unsqueeze(-1) # [batch, intermediate_size, 1] : decoding else: conv_state = nn.functional.pad( hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0) ) cache_params.conv_states[self.layer_idx].copy_(conv_state) hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len]) # [batch, intermediate_size, seq_len] else: ssm_state = torch.zeros( (batch_size, self.intermediate_size, self.ssm_state_size), device=hidden_states.device, dtype=dtype ) hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len]) # [batch, intermediate_size, seq_len] # 3. State Space Model sequence transformation # 3.a. Selection: [batch, seq_len, self.time_step_rank + self.ssm_state_size * 2] ssm_parameters = self.x_proj(hidden_states.transpose(1, 2)) time_step, B, C = torch.split( ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1 ) discrete_time_step = self.dt_proj(time_step) # [batch, seq_len, intermediate_size] discrete_time_step = nn.functional.softplus(discrete_time_step).transpose(1, 2) # [batch, intermediate_size, seq_len] # 3.b. Discretization: B and C to [batch, seq_len, intermediate_size, ssm_state_size] (SRAM) A = -torch.exp(self.A_log.float()) # [intermediate_size, ssm_state_size] discrete_A = torch.exp(A[None, :, None, :] * discrete_time_step[:, :, :, None]) # [batch, intermediate_size, seq_len, ssm_state_size] discrete_B = discrete_time_step[:, :, :, None] * B[:, None, :, :].float() # [batch, intermediade_size, seq_len, ssm_state_size] deltaB_u = discrete_B * hidden_states[:, :, :, None].float() # 3.c perform the recurrence y ← SSM(A, B, C)(x) scan_outputs = [] for i in range(seq_len): ssm_state = discrete_A[:, :, i, :] * ssm_state + deltaB_u[:, :, i, :] # [batch, intermediade_size, ssm_state] scan_output = torch.matmul(ssm_state.to(dtype), C[:, i, :].unsqueeze(-1)) # [batch, intermediade_size, 1] scan_outputs.append(scan_output[:, :, 0]) scan_output = torch.stack(scan_outputs, dim=-1) # [batch, seq_len, intermediade_size] scan_output = scan_output + (hidden_states * self.D[None, :, None]) scan_output = (scan_output * self.act(gate)) if cache_params is not None: cache_params.ssm_states[self.layer_idx].copy_(ssm_state) # 4. Final linear projection contextualized_states = self.out_proj(scan_output.transpose(1, 2)) # [batch, seq_len, hidden_size] return contextualized_states # fmt: on def forward(self, hidden_states, cache_params: Optional[MambaCache] = None): if is_fast_path_available and "cuda" in self.x_proj.weight.device.type: return self.cuda_kernels_forward(hidden_states, cache_params) return self.slow_forward(hidden_states, cache_params) class MambaRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ MambaRMSNorm is equivalent to T5LayerNorm and LlamaRMSNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) class MambaBlock(nn.Module): def __init__(self, config, layer_idx): super().__init__() self.config = config self.layer_idx = layer_idx self.residual_in_fp32 = config.residual_in_fp32 self.norm = MambaRMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.mixer = MambaMixer(config, layer_idx=layer_idx) def forward(self, hidden_states, cache_params: Optional[MambaCache] = None): residual = hidden_states hidden_states = self.norm(hidden_states.to(dtype=self.norm.weight.dtype)) if self.residual_in_fp32: residual = residual.to(torch.float32) hidden_states = self.mixer(hidden_states, cache_params=cache_params) hidden_states = residual + hidden_states return hidden_states class MambaPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = MambaConfig base_model_prefix = "backbone" _no_split_modules = ["MambaBlock"] supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, MambaMixer): module.A_log._no_weight_decay = True module.D._no_weight_decay = True dt_init_std = self.config.time_step_rank**-0.5 * self.config.time_step_scale if self.config.time_step_init_scheme == "constant": nn.init.constant_(module.dt_proj.weight, dt_init_std) elif self.config.time_step_init_scheme == "random": nn.init.uniform_(module.dt_proj.weight, -dt_init_std, dt_init_std) dt = torch.exp( torch.rand(self.config.intermediate_size) * (math.log(self.config.time_step_max) - math.log(self.config.time_step_min)) + math.log(self.config.time_step_min) ).clamp(min=self.config.time_step_floor) # # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759 inv_dt = dt + torch.log(-torch.expm1(-dt)) with torch.no_grad(): module.dt_proj.bias.copy_(inv_dt) module.dt_proj.bias._no_reinit = True if isinstance(module, nn.Linear): if module.bias is not None: if not getattr(module.bias, "_no_reinit", False): nn.init.zeros_(module.bias) elif isinstance(module, nn.Embedding): nn.init.normal_(module.weight, std=self.config.initializer_range) if self.config.rescale_prenorm_residual: # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme: # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers. # > -- GPT-2 :: https://openai.com/blog/better-language-models/ # # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py for name, p in module.named_parameters(): if name in ["out_proj.weight"]: # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block # Following Pytorch init, except scale by 1/sqrt(2 * n_layer) # We need to reinit p since this code could be called multiple times # Having just p *= scale would repeatedly scale it down nn.init.kaiming_uniform_(p, a=math.sqrt(5)) with torch.no_grad(): p /= math.sqrt(self.config.num_layers) @dataclass class MambaOutput(ModelOutput): """ Class for the MAMBA model outputs. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. cache_params (`MambaCache`): The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to avoid providing the old `input_ids`. Includes both the State space model state matrices after the selective scan, and the Convolutional states hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. """ last_hidden_state: Optional[torch.FloatTensor] = None cache_params: Optional[MambaCache] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass class MambaCausalLMOutput(ModelOutput): """ Base class for causal language model (or autoregressive) outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). cache_params (`MambaCache`): The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to avoid providing the old `input_ids`. Includes both the State space model state matrices after the selective scan, and the Convolutional states hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None cache_params: Optional[MambaCache] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None MAMBA_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MambaConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ MAMBA_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): Indices of input sequence tokens in the vocabulary. If `cache_params.seqlen_offset>0`, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. cache_params (`MambaCache`, *optional*): If passed along, the model uses the previous state in all the blocks (which will give the output for the `input_ids` provided as if the model add `state_input_ids + input_ids` as context). use_cache (`bool`, *optional*): If set to `True`, the `cache_params` is returned and can be used to quickly generate the next logits. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare MAMBA Model transformer outputting raw hidden-states without any specific head on top.", MAMBA_START_DOCSTRING, ) class MambaModel(MambaPreTrainedModel): def __init__(self, config): super().__init__(config) self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size) self.layers = nn.ModuleList([MambaBlock(config, layer_idx=idx) for idx in range(config.num_hidden_layers)]) self.gradient_checkpointing = False self.norm_f = MambaRMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) # Initialize weights and apply final processing self._register_load_state_dict_pre_hook(self.load_hook) self.post_init() def load_hook(self, state_dict, prefix, *args): for k in state_dict: if "embedding." in k: state_dict[k.replace("embedding.", "embeddings.")] = state_dict.pop(k) break def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, new_embeddings): self.embeddings = new_embeddings @add_start_docstrings_to_model_forward(MAMBA_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MambaOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.LongTensor] = None, cache_params: Optional[MambaCache] = None, use_cache: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # `attention_mask` is passed by the tokenizer and we don't want it ) -> Union[Tuple, MambaOutput]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): # ^ is python for xor raise ValueError( "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one" ) if inputs_embeds is None: inputs_embeds = self.embeddings(input_ids) if self.gradient_checkpointing and self.training and use_cache: use_cache = False if cache_params is None and use_cache: cache_params = MambaCache( self.config, inputs_embeds.size(0), device=inputs_embeds.device, dtype=inputs_embeds.dtype ) hidden_states = inputs_embeds all_hidden_states = () if output_hidden_states else None for mixer_block in self.layers: if self.gradient_checkpointing and self.training: hidden_states = self._gradient_checkpointing_func(mixer_block.__call__, hidden_states, cache_params) else: hidden_states = mixer_block(hidden_states, cache_params=cache_params) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if use_cache: cache_params.seqlen_offset += inputs_embeds.shape[1] hidden_states = self.norm_f(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, cache_params, all_hidden_states] if v is not None) return MambaOutput( last_hidden_state=hidden_states, cache_params=cache_params if use_cache else None, hidden_states=all_hidden_states, ) @add_start_docstrings( """ The MAMBA Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, MAMBA_START_DOCSTRING, ) class MambaForCausalLM(MambaPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) self.backbone = MambaModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_input_embeddings(self): return self.backbone.get_input_embeddings() def set_input_embeddings(self, new_embeddings): return self.backbone.set_input_embeddings(new_embeddings) def _update_model_kwargs_for_generation( self, outputs: ModelOutput, model_kwargs: Dict[str, Any], **kwargs ) -> Dict[str, Any]: model_kwargs["cache_params"] = outputs.get("cache_params", None) return model_kwargs def prepare_inputs_for_generation( self, input_ids, cache_params: Optional[MambaCache] = None, inputs_embeds=None, attention_mask=None, **kwargs ): # only last token for inputs_ids if the state is passed along. if cache_params is not None: input_ids = input_ids[:, -1].unsqueeze(-1) if inputs_embeds is not None and cache_params is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs["cache_params"] = cache_params return model_inputs @add_start_docstrings_to_model_forward(MAMBA_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MambaCausalLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, cache_params: Optional[MambaCache] = None, labels: Optional[torch.LongTensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, use_cache: Optional[bool] = None, **kwargs, # for now we need this for generation ) -> Union[Tuple, MambaCausalLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict mamba_outputs = self.backbone( input_ids, cache_params=cache_params, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict, use_cache=use_cache, ) hidden_states = mamba_outputs[0] logits = self.lm_head(hidden_states.to(self.lm_head.weight.dtype)).float() loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (logits,) + mamba_outputs[1:] return ((loss,) + output) if loss is not None else output return MambaCausalLMOutput( loss=loss, logits=logits, cache_params=mamba_outputs.cache_params, hidden_states=mamba_outputs.hidden_states, )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/gptsan_japanese/tokenization_gptsan_japanese.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for GPTSANJapanese.""" import collections import json import os import re from typing import List, Optional, Tuple, Union import numpy as np from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import ( BatchEncoding, PreTokenizedInput, PreTokenizedInputPair, TextInput, TextInputPair, TruncationStrategy, ) from ...utils import PaddingStrategy, logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"} def load_vocab_and_emoji(vocab_file, emoji_file): """Loads a vocabulary file and emoji file into a dictionary.""" with open(emoji_file, "r", encoding="utf-8") as f: emoji = json.loads(f.read()) vocab = collections.OrderedDict() raw_vocab = collections.OrderedDict() ids_to_tokens = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as f: token = f.readlines() token = [[t.rstrip("\n")] if (t == ",\n" or "," not in t) else t.rstrip("\n").split(",") for t in token] for idx, b in enumerate(token): ids_to_tokens[idx] = b raw_vocab[",".join(b)] = idx for wd in b: vocab[wd] = idx return vocab, raw_vocab, ids_to_tokens, emoji class GPTSanJapaneseTokenizer(PreTrainedTokenizer): """ This tokenizer is based on GPTNeoXJapaneseTokenizer and has the following modifications - Decoding byte0~byte255 tokens correctly - Added bagofword token handling - Return token_type_ids for Prefix-LM model The bagofword token represents a repetition of the previous token and is converted to 3 consecutive tokens when decoding In addition, the original Japanese special Sub-Word-Encoding has been released in this repository (https://github.com/tanreinama/Japanese-BPEEncoder_V2). The token_type_ids is a mask indicating the prefix input position of the Prefix-LM model. To specify a prefix position, specify a prefix input for prefix_text, or specify a sentence of the prefix part and the part after it as a text pair of batch input. Example: ```python >>> from transformers import GPTSanJapaneseTokenizer >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") >>> # You can confirm both 慶応 and 慶應 are encoded to 17750 >>> tokenizer("吾輩は猫である🐯。実は慶応(慶應)大学出身")["input_ids"] [35993, 35998, 34347, 31459, 30647, 31448, 25, 30659, 35729, 35676, 32417, 30647, 17750, 35589, 17750, 35590, 321, 1281] >>> # Both 慶応 and 慶應 are decoded to 慶応 >>> tokenizer.decode(tokenizer("吾輩は猫である🐯。実は慶応(慶應)大学出身")["input_ids"]) '吾輩は猫である🐯。実は慶応(慶応)大学出身' ``` Example for Prefix-LM: ```python >>> from transformers import GPTSanJapaneseTokenizer >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") >>> tokenizer("実は慶応(慶應)大学出身", prefix_text="吾輩は猫である🐯。")["input_ids"] [35993, 34347, 31459, 30647, 31448, 25, 30659, 35729, 35676, 35998, 32417, 30647, 17750, 35589, 17750, 35590, 321, 1281] >>> # Mask for Prefix-LM inputs >>> tokenizer("実は慶応(慶應)大学出身", prefix_text="吾輩は猫である🐯。")["token_type_ids"] [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0] ``` Example for batch encode: ```python >>> from transformers import GPTSanJapaneseTokenizer >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") >>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["input_ids"] [[35993, 35998, 8640, 25948, 35993, 35998, 30647, 35675, 35999, 35999], [35993, 35998, 10382, 9868, 35993, 35998, 30646, 9459, 30646, 35675]] >>> # Mask for Prefix-LM inputs >>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["token_type_ids"] [[1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]] >>> # Mask for padding >>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["attention_mask"] [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ``` Args: vocab_file (`str`): File containing the vocabulary. emoji_file (`str`): File containing the emoji. unk_token (`str`, *optional*, defaults to `"<|nottoken|>"`): The token used for unknown charactor pad_token (`str`, *optional*, defaults to `"<|separator|>"`): The token used for padding bos_token (`str`, *optional*, defaults to `"<|startoftext|>"`): The beginning of sequence token. eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The end of sequence token. sep_token (`str`, *optional*, defaults to `"<|segmenter|>"`): A special token to separate token to prefix part and general input part. do_clean_text (`bool`, *optional*, defaults to `False`): Whether or not to clean text for URL, EMAIL, TEL, Japanese DATE and Japanese PRICE. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask", "token_type_ids"] def __init__( self, vocab_file, emoji_file, unk_token="<|nottoken|>", pad_token="<|separator|>", bos_token="<|startoftext|>", eos_token="<|endoftext|>", sep_token="<|segmenter|>", do_clean_text=False, **kwargs, ): if not os.path.isfile(vocab_file): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" " model use `tokenizer = GPTSanJapaneseTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) if not os.path.isfile(emoji_file): raise ValueError( f"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google" " pretrained model use `tokenizer = GPTSanJapaneseTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.do_clean_text = do_clean_text self.vocab, self.raw_vocab, self.ids_to_tokens, self.emoji = load_vocab_and_emoji(vocab_file, emoji_file) self.subword_tokenizer = SubWordJapaneseTokenizer( vocab=self.vocab, ids_to_tokens=self.ids_to_tokens, emoji=self.emoji ) super().__init__( unk_token=unk_token, pad_token=pad_token, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, do_clean_text=do_clean_text, **kwargs, ) @property # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer.vocab_size def vocab_size(self): # self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab return len(self.raw_vocab) # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer.get_vocab def get_vocab(self): return dict(self.raw_vocab, **self.added_tokens_encoder) # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer._tokenize def _tokenize(self, text): return self.subword_tokenizer.tokenize(text, clean=self.do_clean_text) # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer._convert_token_to_id def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer._convert_id_to_token def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.subword_tokenizer.convert_id_to_token(index) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" words = [] byte_tokens = [] for word in tokens: if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2])) else: if len(byte_tokens) > 0: words.append(bytearray(byte_tokens).decode("utf-8", errors="replace")) byte_tokens = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji["emoji_inv"][word]) elif word == "<SP>": words.append(" ") elif word == "<BR>": words.append("\n") elif word == "<TAB>": words.append("\t") elif word == "<BLOCK>": words.append("▀") elif word == "<KIGOU>": words.append("ǀ") elif word == "<U2000U2BFF>": words.append("‖") elif word == "<|bagoftoken|>": if len(words) > 0: words.append(words[-1]) words.append(words[-1]) words.append(words[-1]) elif word.startswith("<|") and word.endswith("|>"): words.append("") else: words.append(word) if len(byte_tokens) > 0: words.append(bytearray(byte_tokens).decode("utf-8", errors="replace")) text = "".join(words) return text @property def default_chat_template(self): """ A simple chat template that adds standard BOS, SEP and EOS tokens between messages while discarding role information. """ return ( "{% for message in messages %}" "{% if not loop.first %}{{ bos_token}}{% endif %}" "{{ sep_token }}{{ message.content }} {{ eos_token }}" "{% endfor %}" ) # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer.save_vocabulary def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) emoji_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] ) else: vocab_file = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"] ) emoji_file = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"] ) with open(vocab_file, "w", encoding="utf-8") as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(",".join(token) + "\n") index += 1 with open(emoji_file, "w", encoding="utf-8") as writer: json.dump(self.emoji, writer) return vocab_file, emoji_file def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: # docstyle-ignore """ The tokenizer returns token_type_ids as separators between the Prefix part and the rest. token_type_ids is 1 for the Prefix part and 0 for the rest of the token. Example: ```python >>> from transformers import GPTSanJapaneseTokenizer >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") >>> x_token = tokenizer("アイウエ") >>> # input_ids: | SOT | SEG | ア | イ | ウ | エ | >>> # token_type_ids: | 1 | 0 | 0 | 0 | 0 | 0 | >>> x_token = tokenizer("", prefix_text="アイウエ") >>> # input_ids: | SOT | ア | イ | ウ | エ | SEG | >>> # token_type_ids: | 1 | 1 | 1 | 1 | 1 | 0 | >>> x_token = tokenizer("ウエ", prefix_text="アイ") >>> # input_ids: | SOT | ア | イ | SEG | ウ | エ | >>> # token_type_ids: | 1 | 1 | 1 | 0 | 0 | 0 | ```""" prefix_len = 0 if self.sep_token in self.vocab: segid = self.vocab[self.sep_token] if segid in token_ids_0: prefix_len = token_ids_0.index(segid) if token_ids_1 is None: total_len = len(token_ids_0) else: total_len = len(token_ids_0 + token_ids_1) return prefix_len * [1] + (total_len - prefix_len) * [0] def prepare_for_tokenization(self, text, prefix_text=None, add_sep_token=None, **kwargs): # GPTSAN inserts extra SEP tokens in Prefix-LM in addition to SOT for text generation. # SOT at the beginning of the text, and SEP at the separator between the Prefix part and the rest. if add_sep_token is None: add_sep_token = self.sep_token not in text # If insert un-prefix position explicitly prepared = self.bos_token if self.bos_token in self.vocab else "" prepared += prefix_text if prefix_text is not None else "" if add_sep_token: prepared += self.sep_token if self.sep_token in self.vocab else "" prepared += text return (prepared, kwargs) def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair] ], add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, ) -> BatchEncoding: # This tokenizer converts input text pairs into Prefix input and subsequent input if isinstance(batch_text_or_text_pairs[0], tuple) or isinstance(tuple(batch_text_or_text_pairs[0]), list): # As a single text with an explicit un-prefix position batch_prefix_texts = [] for pref, txt in batch_text_or_text_pairs: batch_prefix_texts.append(pref + self.sep_token + txt) batch_text_or_text_pairs = batch_prefix_texts return super()._batch_encode_plus( batch_text_or_text_pairs, add_special_tokens, padding_strategy, truncation_strategy, max_length, stride, is_split_into_words, pad_to_multiple_of, return_tensors, return_token_type_ids, return_attention_mask, return_overflowing_tokens, return_special_tokens_mask, return_offsets_mapping, return_length, verbose, ) class SubWordJapaneseTokenizer(object): """ This tokenizer is based on GPTNeoXJapaneseTokenizer and has the following modifications - Decoding byte0~byte255 tokens correctly - Added bagofword token handling https://github.com/tanreinama/Japanese-BPEEncoder_V2 This tokenizer class is under MIT Lisence according to the original repository. MIT License Copyright (c) 2020 tanreinama Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ # Copied from tokenization_gpt_neox_japanese.SubWordJapaneseTokenizer.__init__ def __init__(self, vocab, ids_to_tokens, emoji): self.vocab = vocab # same as swe self.ids_to_tokens = ids_to_tokens # same as bpe self.emoji = emoji self.maxlen = np.max([len(w) for w in self.vocab.keys()]) self.content_repatter1 = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)") self.content_repatter2 = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*") self.content_repatter3 = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}") self.content_repatter4 = re.compile( r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) self.content_repatter5 = re.compile( r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) self.content_repatter6 = re.compile( r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" ) keisen = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿" blocks = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟" self.content_trans1 = str.maketrans({k: "<BLOCK>" for k in keisen + blocks}) # Copied from tokenization_gpt_neox_japanese.SubWordJapaneseTokenizer.__len__ def __len__(self): return len(self.ids_to_tokens) # Copied from tokenization_gpt_neox_japanese.SubWordJapaneseTokenizer.clean_text def clean_text(self, content): content = self.content_repatter1.sub("<URL>", content) content = self.content_repatter2.sub("<EMAIL>", content) content = self.content_repatter3.sub("<TEL>", content) content = self.content_repatter4.sub("<DATE>", content) content = self.content_repatter5.sub("<DATE>", content) content = self.content_repatter6.sub("<PRICE>", content) content = content.translate(self.content_trans1) while "<BLOCK><BLOCK>" in content: content = content.replace("<BLOCK><BLOCK>", "<BLOCK>") return content # Copied from tokenization_gpt_neox_japanese.SubWordJapaneseTokenizer.tokenize def tokenize(self, text, clean=False): text = text.replace(" ", "<SP>") text = text.replace(" ", "<SP>") text = text.replace("\r\n", "<BR>") text = text.replace("\n", "<BR>") text = text.replace("\r", "<BR>") text = text.replace("\t", "<TAB>") text = text.replace("—", "ー") text = text.replace("−", "ー") for k, v in self.emoji["emoji"].items(): if k in text: text = text.replace(k, v) if clean: text = self.clean_text(text) def check_simbol(x): e = x.encode() if len(x) == 1 and len(e) == 2: c = (int(e[0]) << 8) + int(e[1]) if ( (c >= 0xC2A1 and c <= 0xC2BF) or (c >= 0xC780 and c <= 0xC783) or (c >= 0xCAB9 and c <= 0xCBBF) or (c >= 0xCC80 and c <= 0xCDA2) ): return True return False def checku2e(x): e = x.encode() if len(x) == 1 and len(e) == 3: c = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2]) if c >= 0xE28080 and c <= 0xE2B07F: return True return False pos = 0 result = [] while pos < len(text): end = min(len(text), pos + self.maxlen + 1) if text[pos] == "<" else pos + 3 candidates = [] # (token_id, token, pos) for e in range(end, pos, -1): wd = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(wd) > 2: candidates = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e)) if len(candidates) > 0: # the smallest token_id is adopted _, wd, e = sorted(candidates, key=lambda x: x[0])[0] result.append(wd) pos = e else: end = pos + 1 wd = text[pos:end] if check_simbol(wd): result.append("<KIGOU>") elif checku2e(wd): result.append("<U2000U2BFF>") else: for i in wd.encode("utf-8"): result.append("<|byte%d|>" % i) pos = end return result def convert_id_to_token(self, index): return self.ids_to_tokens[index][0]
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py
# coding=utf-8 # Copyright 2023 Toshiyuki Sakamoto(tanreinama) and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch GPTSANJapanese model.""" import copy from typing import List, Optional, Tuple, Union import torch import torch.nn as nn from ...activations import ACT2FN from ...modeling_outputs import MoECausalLMOutputWithPast, MoEModelOutputWithPastAndCrossAttentions from ...modeling_utils import PreTrainedModel from ...utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging, ) from .configuration_gptsan_japanese import GPTSanJapaneseConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "GPTSanJapaneseConfig" _CHECKPOINT_FOR_DOC = "Tanrei/GPTSAN-japanese" #################################################### # This dict contains ids and associated url # for the pretrained weights provided with the models #################################################### from ..deprecated._archive_maps import GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 # Copied from transformers.models.switch_transformers.modeling_switch_transformers.router_z_loss_func def router_z_loss_func(router_logits: torch.Tensor) -> float: r""" Compute the router z-loss implemented in PyTorch. The router z-loss was introduced in [Designing Effective Sparse Expert Models](https://arxiv.org/abs/2202.08906). It encourages router logits to remain small in an effort to improve stability. Args: router_logits (`float`): Input logits of shape [batch_size, sequence_length, num_experts] Returns: Scalar router z-loss. """ num_groups, tokens_per_group, _ = router_logits.shape log_z = torch.logsumexp(router_logits, dim=-1) z_loss = log_z**2 return torch.sum(z_loss) / (num_groups * tokens_per_group) # Copied from transformers.models.switch_transformers.modeling_switch_transformers.load_balancing_loss_func def load_balancing_loss_func(router_probs: torch.Tensor, expert_indices: torch.Tensor) -> float: r""" Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch. See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between experts is too unbalanced. Args: router_probs (`torch.Tensor`): Probability assigned to each expert per token. Shape: [batch_size, seqeunce_length, num_experts]. expert_indices (`torch.Tensor`): Indices tensor of shape [batch_size, seqeunce_length] identifying the selected expert for a given token. Returns: The auxiliary loss. """ num_experts = router_probs.shape[-1] # cast the expert indices to int64, otherwise one-hot encoding will fail if expert_indices.dtype != torch.int64: expert_indices = expert_indices.to(torch.int64) if len(expert_indices.shape) == 2: expert_indices = expert_indices.unsqueeze(2) expert_mask = torch.nn.functional.one_hot(expert_indices, num_experts) # For a given token, determine if it was routed to a given expert. expert_mask = torch.max(expert_mask, axis=-2).values # cast to float32 otherwise mean will fail expert_mask = expert_mask.to(torch.float32) tokens_per_group_and_expert = torch.mean(expert_mask, axis=-2) router_prob_per_group_and_expert = torch.mean(router_probs, axis=-2) return torch.mean(tokens_per_group_and_expert * router_prob_per_group_and_expert) * (num_experts**2) class GPTSanJapaneseDenseActDense(nn.Module): """ FFN Layer for Switch Transformer and Extra layers GPTSAN can mix Switch Transformer layers and normal Transformer layers This class is used as Expert in Switch Transformer layers and as FFN in regular Transformer layers. RELU is used in the Switch Transformer layer, and Swish is used in the normal Transformer layer, so there is a choice of which is used in the argument. """ def __init__(self, config: GPTSanJapaneseConfig, ext_layer=False): super().__init__() d_inter = config.d_ext if ext_layer else config.d_ff self.wi = nn.Linear(config.d_model, d_inter, bias=ext_layer) self.wo = nn.Linear(d_inter, config.d_model, bias=ext_layer) self.dropout = nn.Identity() if ext_layer else nn.Dropout(config.dropout_rate) self.act = ACT2FN["swish" if ext_layer else "relu"] def forward(self, hidden_states): r""" Args: hidden_states (`torch.Tensor`) : [num_groups, tokens_per_group, hidden_dim] inputs to send to experts. Returns: torch.Tensor[num_groups, tokens_per_group, hidden_dim] """ hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersTop1Router with SwitchTransformers->GPTSanJapanese class GPTSanJapaneseTop1Router(nn.Module): """ Router using tokens choose top-1 experts assignment. This router uses the same mechanism as in Switch Transformer (https://arxiv.org/abs/2101.03961) and V-MoE (https://arxiv.org/abs/2106.05974): tokens choose their top experts. Items are sorted by router_probs and then routed to their choice of expert until the expert's expert_capacity is reached. **There is no guarantee that each token is processed by an expert**, or that each expert receives at least one token. """ def __init__(self, config: GPTSanJapaneseConfig): super().__init__() self.num_experts = config.num_experts self.expert_capacity = config.expert_capacity self.classifier = nn.Linear(config.hidden_size, self.num_experts, bias=config.router_bias) self.jitter_noise = config.router_jitter_noise self.ignore_padding_tokens = config.router_ignore_padding_tokens self.dtype = getattr(torch, config.router_dtype) def _compute_router_probabilities(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: r""" Computes router probabilities from input hidden states. Args: hidden_states (`torch.Tensor`): (batch_size, sequence_length, hidden_dim) from which router probabilities are computed. Returns: router_probabilities (`torch.Tensor`): Tensor of shape (batch_size, sequence_length, num_experts) corresponding to the probabilities for each token and expert. Used for routing tokens to experts. router_logits (`torch.Tensor`): Logits tensor of shape (batch_size, sequence_length, num_experts) corresponding to raw router logits. This is used later for computing router z-loss. """ # float32 is used to ensure stability. See the discussion of "selective precision" in # https://arxiv.org/abs/2101.03961. # We also store the previous dtype to cast back the output to the previous dtype self.input_dtype = hidden_states.dtype hidden_states = hidden_states.to(self.dtype) if self.training and self.jitter_noise > 0: # Multiply the token inputs by the uniform distribution - adding some noise hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise) # Shape: [num_groups, tokens_per_group, num_experts] self._cast_classifier() router_logits = self.classifier(hidden_states) # Apply Softmax and cast back to the original `dtype` router_probabilities = nn.functional.softmax(router_logits, dim=-1, dtype=self.dtype).to(self.input_dtype) return router_probabilities, router_logits def _cast_classifier(self): r""" `bitsandbytes` `Linear8bitLt` layers does not support manual casting Therefore we need to check if they are an instance of the `Linear8bitLt` class by checking special attributes. """ if not (hasattr(self.classifier, "SCB") or hasattr(self.classifier, "CB")): self.classifier = self.classifier.to(self.dtype) def forward(self, hidden_states: torch.Tensor) -> Tuple: r""" Generic forward function for every Router class. Each Router expects to have the same input hidden states (`hidden_states`) corresponding to the hidden states for each token, the `expert_capacity` corresponding to the number of tokens the Router will send to each expert, some Routers can send up to few tokens to each expert. Each Router works as the following: it expects the hidden states for each token, gets the `router_probs` and `router_logits` from the `router_weights`. This will assign for each token, the raw probability to be assigned to an expert. Then each Router class will have to define its own `_compute_routing_instructions`. Args: hidden_states (`torch.Tensor`) : [num_groups, tokens_per_group, hidden_dim] inputs to send to experts. Returns: Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`] Tuple containing the expert index, the router probs and the router logits. The router probabilities and logits are required to compute the loss. """ router_probs, router_logits = self._compute_router_probabilities(hidden_states) expert_index = torch.argmax(router_probs, dim=-1) expert_index = torch.nn.functional.one_hot(expert_index, num_classes=self.num_experts) # Mask tokens outside expert capacity. Sum over each sequence token_priority = torch.cumsum(expert_index, dim=-2) # mask if the token routed to to the expert will overflow expert_capacity_mask = token_priority <= self.expert_capacity expert_index = expert_index * expert_capacity_mask router_probs = torch.max(router_probs, dim=-1).values.unsqueeze(-1) return expert_index, router_probs, router_logits # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersSparseMLP with SwitchTransformers->GPTSanJapanese class GPTSanJapaneseSparseMLP(nn.Module): r""" Implementation of the Switch Transformers Sparse MLP module. """ def __init__(self, config: GPTSanJapaneseConfig, expert_class: nn.Module = GPTSanJapaneseDenseActDense): super().__init__() # Step 1: Get the correct router according to its class self.router = GPTSanJapaneseTop1Router(config) # Step 2: Get the experts self.experts = nn.ModuleDict() for idx in range(config.num_experts): self.experts[f"expert_{idx}"] = expert_class(config) def forward(self, hidden_states): r""" Hold on, this will be slightly tricky to understand In the correct order, a MoE layer does the following: 1- Gets the `router_mask` from the router. The shape of the mask is `(batch_size, sequence_length, num_expert)` and corresponds to the argmax of the `router_probs`. The probabilities are needed in the computation of the hidden states : they are broadcasted to the hidden states values (can be interpreted as a scaling factor). 2- Dispatch the tokens to its associated experts. We do a classic for loop over the experts and assign for each expert the corresponding hidden states. """ # Step 1: Get the router_mask from the router as wel as the probabilities router_mask, router_probs, router_logits = self.router(hidden_states) expert_index = torch.argmax(router_mask, dim=-1) # The routers introduced might not always map all the tokens, to a router, which means that some hidden states # can be unchanged from one layer to another. That is why the hidden states are cloned before updating only the seleced ones. next_states = hidden_states.clone() for idx, expert in enumerate(self.experts.values()): token_indices = router_mask[:, :, idx].bool() next_states[token_indices] = expert(hidden_states[token_indices]).to(next_states.dtype) hidden_states = router_probs * next_states return hidden_states, (router_logits, expert_index) class GPTSanJapaneseLayerSparseFF(nn.Module): r""" Switch Transformers Feed Forward layer module. This is a wrapper around the Mixture of Experts module. Parameters: config : ([`GPTSanJapaneseConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ def __init__(self, config: GPTSanJapaneseConfig): super().__init__() self.mlp = GPTSanJapaneseSparseMLP(config) self.soft_bypass_mlp = nn.Linear(config.d_model, config.d_model, bias=False) self.norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon) def forward(self, hidden_states, output_router_logits): r""" Args: hidden_states (`torch.Tensor`) : [num_groups, tokens_per_group, hidden_dim] inputs to send to experts. output_router_logits (`bool`) : output experts router output. Returns: torch.Tensor[num_groups, tokens_per_group, hidden_dim] """ forwarded_states, router_tuple = self.mlp(hidden_states) forwarded_states += torch.tanh(self.soft_bypass_mlp(hidden_states)) output = hidden_states + self.norm(forwarded_states) if output_router_logits and router_tuple is not None: return output, router_tuple else: return output class GPTSanJapaneseLayerDenseFF(nn.Module): r""" Extra Transformers Feed Forward layer module. Parameters: config : ([`GPTSanJapaneseConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ def __init__(self, config: GPTSanJapaneseConfig): super().__init__() # Check if it is a sparse layer, if not then it is a dense layer self.mlp = GPTSanJapaneseDenseActDense(config, ext_layer=True) self.norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon) def forward(self, hidden_states): r""" Args: hidden_states (`torch.Tensor`) : [num_groups, tokens_per_group, hidden_dim] inputs to send to experts. Returns: torch.Tensor[num_groups, tokens_per_group, hidden_dim] """ forwarded_states = self.mlp(hidden_states) output = hidden_states + self.norm(forwarded_states) return output # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->GPTSanJapanese class GPTSanJapaneseAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[GPTSanJapaneseConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value class GPTSanJapaneseLayerSelfAttention(nn.Module): """ Self Attention and Normalization Unit """ def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.self_attn = GPTSanJapaneseAttention( embed_dim=config.d_model, num_heads=config.num_heads, is_decoder=True, bias=has_relative_attention_bias, ) self.norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon) def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]: r""" Self-attention and normalize block. Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. head_mask (`numpy.ndarray` of shape `({0})`, `optional): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. Returns: Tuple[torch.Tensor[num_groups, tokens_per_group, hidden_dim],...] """ # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple atten_out = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=(1 - attention_mask) * torch.finfo(hidden_states.dtype).min, layer_head_mask=head_mask, output_attentions=output_attentions, ) if output_attentions: attn_weights = (atten_out[1],) else: attn_weights = () attention_output = atten_out[0] hidden = hidden_states + self.norm(attention_output) if use_cache: outputs = (hidden, atten_out[2]) # hidden, present, (attentions) else: outputs = (hidden,) # hidden, (attentions) return outputs + attn_weights class GPTSanJapaneseBlock(nn.Module): """ Self Attention and FFN Unit """ def __init__(self, config, ext_layer=False): super().__init__() self.self_attn = GPTSanJapaneseLayerSelfAttention(config) self.feed_forward = GPTSanJapaneseLayerDenseFF(config) if ext_layer else GPTSanJapaneseLayerSparseFF(config) def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, output_router_tuple: Optional[bool] = False, ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]: r""" GPTSAN transformer block. Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. head_mask (`numpy.ndarray` of shape `({0})`, `optional): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`) : output attention probabirities. output_router_tuple: output experts router logits and expert id. Returns: Tuple[torch.Tensor[num_groups, tokens_per_group, hidden_dim],...] """ atten_out = self.self_attn( hidden_states=hidden_states, past_key_value=past_key_value, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, ) attention_output = atten_out[0] if isinstance(self.feed_forward, GPTSanJapaneseLayerSparseFF): sparse_out = self.feed_forward(attention_output, output_router_tuple) if output_router_tuple: hidden, router_tuple = sparse_out else: hidden = sparse_out else: hidden = self.feed_forward(attention_output) outputs = (hidden,) + atten_out[1:] if isinstance(self.feed_forward, GPTSanJapaneseLayerSparseFF) and output_router_tuple: outputs += (router_tuple,) return outputs class GPTSanJapanesePreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = GPTSanJapaneseConfig base_model_prefix = "gptsan_japanese" supports_gradient_checkpointing = False _no_split_modules = ["GPTSanJapaneseBlock"] _skip_keys_device_placement = "past_key_values" @property def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { "input_ids": input_ids, "attention_mask": input_mask, } return dummy_inputs def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, nn.LayerNorm): module.weight.data.fill_(factor * 1.0) module.bias.data.zero_() elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module, "bias") and module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, GPTSanJapaneseModel): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.embed_tokens.weight.data.normal_(mean=0.0, std=factor * 1.0) module.position_embeddings.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, "extra_position_embeddings") and module.extra_position_embeddings is not None: module.extra_position_embeddings.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, (GPTSanJapaneseModel, GPTSanJapaneseForConditionalGeneration)): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.final_logits_bias.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, "lm_head") and not self.config.tie_word_embeddings: module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, GPTSanJapaneseDenseActDense): # Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, "bias") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, GPTSanJapaneseAttention): # Multi-headed attention d_model = self.config.d_model key_value_proj_dim = self.config.d_model n_heads = self.config.num_heads module.k_proj.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.v_proj.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.q_proj.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.out_proj.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) elif isinstance(module, GPTSanJapaneseSparseMLP): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_model n_heads = self.config.num_heads module.router.classifier.weight.data.normal_(mean=0.0, std=factor * 1) for idx in range(self.config.num_experts): module.experts[f"expert_{idx}"].wi.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.experts[f"expert_{idx}"].wo.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id if decoder_start_token_id is None: raise ValueError( "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. " "See T5 docs for more information." ) # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids GPTSAN_JAPANESE_START_DOCSTRING = r""" The [GPTSAN-japanese](https://github.com/tanreinama/GPTSAN) model was proposed in General-purpose Swich transformer based Japanese language model This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`GPTSanJapaneseConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ GPTSAN_JAPANESE_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. GPTSAN-japanese is a model that generates sentence continuations or predicts tokens at mask positions. Special tokens required for inputs to the model are automatically appended. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): An input that masks the Prefix part in the Prefix-LM input. Mask values selected in `[0, 1]`: - 1 for tokens that are **prefix** input, - 0 for tokens that are **not-prefix** input. spout (`torch.Tensor` of shape `(batch_size, config.d_spout)`): This vector is transformed through an 8-layer FFN and can be used instead of `past_key_values`. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Router logits of the decoder model, useful to compute the auxiliary loss for Mixture of Experts models. """ @add_start_docstrings( "The bare GPTSAN-japanese Model transformer outputting raw hidden-states without any specific head on top.", GPTSAN_JAPANESE_START_DOCSTRING, ) class GPTSanJapaneseModel(GPTSanJapanesePreTrainedModel): def __init__(self, config: GPTSanJapaneseConfig): super().__init__(config) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.d_model) self.config = copy.deepcopy(config) self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model) self.last_project = nn.Linear(config.d_model, config.d_model, bias=True) self.act = ACT2FN["swish"] self.blocks = torch.nn.ModuleList([]) for _ in range(config.num_switch_layers): self.blocks.append(GPTSanJapaneseBlock(config)) for _ in range(config.num_ext_layers): self.blocks.append(GPTSanJapaneseBlock(config, ext_layer=True)) if config.num_ext_layers > 0: self.extra_position_embeddings = nn.Embedding(config.max_position_embeddings, config.d_model) if config.d_spout: spouts = [] for _ in range(8): spouts.append(nn.Linear(config.d_spout, config.d_spout, bias=False)) spouts.append(nn.Tanh()) spouts.append(nn.Linear(config.d_spout, config.num_layers * 2 * config.d_model, bias=False)) self.spout = nn.Sequential(*spouts) self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings @add_start_docstrings_to_model_forward(GPTSAN_JAPANESE_INPUTS_DOCSTRING) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.FloatTensor] = None, spout: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, head_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, output_router_logits: Optional[bool] = None, num_precontext: Optional[torch.LongTensor] = None, ) -> Union[MoEModelOutputWithPastAndCrossAttentions, Tuple[torch.FloatTensor]]: r""" num_precontext (`torch.LongTensor` of shape `(batch_size,1)`): length of `hybrid` input tokens in the input. Tokens up to this length refer to both front and back like BERT, tokens after that refer only to front like GPT. see also: https://github.com/tanreinama/GPTSAN/blob/main/report/model.md Returns: `MoEModelOutputWithPastAndCrossAttentions` or `tuple` if `return_dict` returns MoEModelOutputWithPastAndCrossAttentions insted of tuple """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict device = self.position_embeddings.weight.device if input_ids is None: input_ids = torch.zeros([1, 1]).int().to(device) # dummy for input_ids was None if inputs_embeds is not None: raise NotImplementedError( "GPTSanJapaneseModel does not use `inputs_embeds`. Make sure to pass in `input_ids` instead." ) num_pasts_contexts = 0 num_batch = input_ids.shape[0] pasts_or_spout_value = None if past_key_values is not None: num_pasts_contexts = past_key_values[0][0].shape[2] elif self.config.d_spout and spout is not None: # `spout` is a special input vector specific to GPTSAN # This controls the output by projecting embedded information such as the class of sentences during learning. # It should passed instead of the first past_key_value. # See the original GPTSAN repository for details num_pasts_contexts += 1 # If there is an attention_mask, increase first one for spout if self.config.d_spout and spout is not None and attention_mask is not None: attention_mask_with_spout = torch.ones(num_batch, attention_mask.shape[1] + 1, device=device) attention_mask_with_spout[:, 1:] -= 1 - attention_mask # 1st token should be spout attention_mask = attention_mask_with_spout # update attention_mask if num_precontext is not None: # `num_precontext` is the number of tokens that refer to each other in prefix-lm # created per batch, so dimension of num_precontext should be [batch, 1] if not ( len(num_precontext.shape) == 2 and num_precontext.shape[1] == 1 ): # num_precontext Should be [batch,1] raise ValueError("num_precontext should be [batch, 1] size.") num_precontext = torch.reshape(num_precontext, [-1]) else: num_precontext = torch.zeros([num_batch]).int().to(device) num_input_contexts = input_ids.shape[1] num_output_contexts = num_input_contexts + num_pasts_contexts hidden_states = self.embed_tokens(input_ids) if past_key_values is not None: pasts_or_spout_value = past_key_values elif self.config.d_spout and spout is not None: # Make vector from `spout` of GPTSAN to the same shape as past_key_values pasts_or_spout_value = self.spout(spout) # projecting `spout` vector pasts_or_spout_value = torch.reshape( pasts_or_spout_value, [ num_batch, self.config.num_layers, 2, self.config.num_heads, num_pasts_contexts, self.config.d_model // self.config.num_heads, ], ) pasts_or_spout_value = torch.split(pasts_or_spout_value, [1] * self.config.num_layers, dim=1) # make same shape as past_key_values pasts_or_spout_value = tuple( tuple([b.squeeze(1) for b in torch.split(a.squeeze(1), [1, 1], dim=1)]) for a in pasts_or_spout_value ) else: pasts_or_spout_value = [None] * self.config.num_layers # Token position considering spout and pasts token_position = torch.arange(num_input_contexts).to(device) + num_pasts_contexts if attention_mask is None: attention_mask = torch.ones(num_batch, num_input_contexts, device=device) # positions for get position_embeddings gather_position = ( ( torch.zeros((num_batch, self.config.d_model, num_input_contexts)).to(device) + token_position.unsqueeze(0) ) .transpose(1, 2) .long() ) # When padding with padding_side="left", zeros line up on the left side of attention_mask, so position_embeddings is shifted accordingly gather_position -= (1 - attention_mask).argmin(dim=-1).unsqueeze(1).unsqueeze(2) gather_position = torch.clip(gather_position, num_pasts_contexts, self.config.max_position_embeddings - 1) # attention_mask is applied per batch for i in range(num_batch): hidden_states[i] += torch.gather(self.position_embeddings.weight, dim=0, index=gather_position[i]) # Create a mask to be used when making the prefix Input length of Prefix-LM variable causal_mask = ( torch.tril(torch.ones((num_output_contexts, num_output_contexts), dtype=torch.uint8)) .view(1, 1, num_output_contexts, num_output_contexts) .to(device) ) prefix_lm_mask = causal_mask[:, :, -num_input_contexts:, :] if token_type_ids is not None: token_type_ids = token_type_ids.unsqueeze(1).unsqueeze(2) prefix_lm_mask = ((prefix_lm_mask + token_type_ids) > 0).float() # Marge prefix_lm_mask and attention_mask extended_attention_mask = prefix_lm_mask * attention_mask.unsqueeze(1).unsqueeze(2) # Prepare head mask if needed if head_mask is not None: head_mask = self.get_head_mask( head_mask, self.config.num_switch_layers + self.config.num_ext_layers ) # n_layer x batch x n_heads x N x N # outputs present_key_value_states = () if self.config.use_cache or use_cache else None all_hidden_states = () if self.config.output_hidden_states or output_hidden_states else None all_attentions = () if self.config.output_attentions or output_attentions else None all_router_probs = () if self.config.output_router_logits or output_router_logits else None for layer, past in enumerate(pasts_or_spout_value): if layer == self.config.num_switch_layers: if self.config.num_ext_layers > 0: # extra_position_embeddings are extra position embeddings that are only created when extending the model with code from the original GPTSAN repository. Not used in the default model. # However, it is created when you create an additional layer and partially train only that location. # Therefore, convert_gptsan_tf_checkpoint_to_pytorch.py is used when converting and loading models created in the original GPTSAN repository. for i in range(num_batch): hidden_states[i] += torch.gather( self.extra_position_embeddings.weight, dim=0, index=gather_position[i] ) output_router_tuple = ( self.config.output_router_logits or output_router_logits ) and layer < self.config.num_switch_layers block_output = self.blocks[layer]( hidden_states=hidden_states, past_key_value=past, attention_mask=extended_attention_mask, head_mask=head_mask, use_cache=self.config.use_cache or use_cache, output_attentions=self.config.output_attentions or output_attentions, output_router_tuple=output_router_tuple, ) outpos = 0 hidden_states = block_output[outpos] if self.config.output_hidden_states or output_hidden_states: all_hidden_states += (hidden_states,) if self.config.use_cache or use_cache: outpos += 1 present = block_output[outpos] present_key_value_states += (present,) if self.config.output_attentions or output_attentions: outpos += 1 attention_probs = block_output[outpos] all_attentions += (attention_probs,) if output_router_tuple: outpos += 1 router_tuple = block_output[outpos] all_router_probs.append(router_tuple[0]) hidden_states = self.last_project(hidden_states) hidden_states = self.act(hidden_states) if self.config.output_hidden_states or output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_router_probs, ] if v is not None ) return MoEModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, router_probs=all_router_probs, ) @add_start_docstrings( "The bare GPTSAN-japanese Model with a language modeling head.", GPTSAN_JAPANESE_START_DOCSTRING, ) class GPTSanJapaneseForConditionalGeneration(GPTSanJapanesePreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config: GPTSanJapaneseConfig): super().__init__(config) self.model = GPTSanJapaneseModel(config) self.register_buffer("final_logits_bias", torch.zeros([1, config.vocab_size])) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) if not self.config.torchscript: self.lm_head.weight = self.model.embed_tokens.weight @add_start_docstrings_to_model_forward(GPTSAN_JAPANESE_INPUTS_DOCSTRING) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.FloatTensor] = None, spout: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, head_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, output_router_logits: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, ) -> Union[Tuple[torch.FloatTensor], MoECausalLMOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Returns: `MoECausalLMOutputWithPast` or `tuple` if `return_dict` returns MoECausalLMOutputWithPast insted of tuple Example: Text Generation with regular LM Model ```python >>> from transformers import AutoModel, AutoTokenizer, trainer_utils >>> device = "cuda" >>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device) >>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") >>> x_token = tokenizer("織田信長は、", return_tensors="pt") >>> trainer_utils.set_seed(30) >>> input_ids = x_token.input_ids.to(device) >>> gen_token = model.generate(input_ids, max_new_tokens=50) >>> tokenizer.decode(gen_token[0]) "織田信長は、政治・軍事の中枢まで掌握した政治家であり、日本史上類を見ない驚異的な軍事侵攻を続け..." ``` Text Generation with Prefix-LM Model ```python >>> from transformers import AutoModel, AutoTokenizer, trainer_utils >>> device = "cuda" >>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device) >>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") >>> x_token = tokenizer("", prefix_text="織田信長は、", return_tensors="pt") >>> trainer_utils.set_seed(30) >>> input_ids = x_token.input_ids.to(device) >>> token_type_ids = x_token.token_type_ids.to(device) >>> gen_token = model.generate(input_ids, token_type_ids=token_type_ids, max_new_tokens=50) >>> tokenizer.decode(gen_token[0]) "織田信長は、政治・外交で数々の戦果を上げるが、1568年からは、いわゆる本能寺の変で細川晴元に暗殺される..." ``` Simultaneously Text Generation And Masked Language Model ```python >>> from transformers import AutoModel, AutoTokenizer, trainer_utils >>> device = "cuda" >>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device) >>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") >>> masked_sentence = "武田信玄は、<|inputmask|>時代ファンならぜひ押さえ<|inputmask|>きたい名将の一人。" >>> x_token = tokenizer("", prefix_text=masked_sentence, return_tensors="pt") >>> trainer_utils.set_seed(30) >>> input_ids = x_token.input_ids.to(device) >>> token_type_ids = x_token.token_type_ids.to(device) >>> out_lm_token = model.generate(input_ids, token_type_ids=token_type_ids, max_new_tokens=50) >>> out_mlm_token = model(input_ids, token_type_ids=token_type_ids).logits.argmax(axis=-1) >>> tokenizer.decode(out_mlm_token[0]) "武田信玄は、戦国時代ファンならぜひ押さえておきたい名将の一人。" >>> tokenizer.decode(out_lm_token[0][input_ids.shape[1] :]) "武田氏の三代に渡った武田家のひとり\n甲斐市に住む、日本史上最大の戦国大名。..." ```""" SEG_TOKEN = self.config.separator_token_id use_cache = use_cache or self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict model_return_dict = True num_precontext = None if input_ids is not None: num_batch = input_ids.shape[0] num_precontext = torch.zeros([num_batch]).int().to(input_ids.device) where_separators = torch.where(input_ids == SEG_TOKEN) num_precontext[where_separators[0]] += where_separators[1] num_precontext = num_precontext.unsqueeze(1) outputs = self.model( input_ids, attention_mask, token_type_ids, spout, past_key_values, head_mask, use_cache, inputs_embeds, decoder_inputs_embeds, output_attentions, output_hidden_states, model_return_dict, output_router_logits, num_precontext, ) lm_logits = self.lm_head(outputs[0]) if lm_logits.shape[-1] == self.final_logits_bias.shape[-1]: lm_logits = lm_logits + self.final_logits_bias loss = None z_loss = None router_probs = None aux_loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(lm_logits.device) loss_fct = nn.CrossEntropyLoss(ignore_index=-100) if output_router_logits: # Compute the router loss (z_loss + auxiliary loss) for each router in the encoder and decoder router_logits, expert_indexes = self._unpack_router_logits(outputs.router_probs) z_loss = router_z_loss_func(router_logits) router_probs = nn.Softmax(dim=-1)(router_logits) aux_loss = load_balancing_loss_func(router_probs, expert_indexes) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) if not return_dict: return tuple( v for v in [ loss, lm_logits, outputs.past_key_values, outputs.hidden_states, outputs.router_probs, z_loss, aux_loss, ] if v is not None ) return MoECausalLMOutputWithPast( loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, router_logits=outputs.router_probs, z_loss=z_loss, aux_loss=aux_loss, ) def prepare_inputs_for_generation( self, input_ids: torch.LongTensor, attention_mask: torch.FloatTensor, token_type_ids: Optional[torch.FloatTensor] = None, spout: Optional[Union[List, torch.FloatTensor]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, **kwargs, ): if isinstance(spout, list): spout = torch.tensor(spout).float() if input_ids is not None: spout = spout.to(input_ids.device) if past_key_values is not None: return { "input_ids": input_ids[:, -1:] if input_ids is not None else None, "attention_mask": attention_mask, "token_type_ids": token_type_ids[:, -1:] if token_type_ids is not None else None, "spout": spout, "past_key_values": past_key_values, } return { "input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, "spout": spout, "past_key_values": None, } # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersForConditionalGeneration.prepare_decoder_input_ids_from_labels with SwitchTransformers->GPTSanJapanese def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) # Copied from transformers.models.mbart.modeling_mbart.MBartForConditionalGeneration.resize_token_embeddings with MBart->GPTSanJapanese def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of) self._resize_final_logits_bias(new_embeddings.weight.shape[0]) return new_embeddings # Copied from transformers.models.mbart.modeling_mbart.MBartForConditionalGeneration._resize_final_logits_bias with MBart->GPTSanJapanese def _resize_final_logits_bias(self, new_num_tokens: int) -> None: old_num_tokens = self.final_logits_bias.shape[-1] if new_num_tokens <= old_num_tokens: new_bias = self.final_logits_bias[:, :new_num_tokens] else: extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) self.register_buffer("final_logits_bias", new_bias) def get_input_embeddings(self): return self.model.get_input_embeddings() def set_input_embeddings(self, new_embeddings): self.model.set_input_embeddings(new_embeddings) # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersForConditionalGeneration.set_output_embeddings with SwitchTransformers->GPTSanJapanese def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersForConditionalGeneration.get_output_embeddings with SwitchTransformers->GPTSanJapanese def get_output_embeddings(self): return self.lm_head # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersForConditionalGeneration._unpack_router_logits with SwitchTransformers->GPTSanJapanese def _unpack_router_logits(self, router_outputs): total_router_logits = [] total_expert_indexes = [] for router_output in router_outputs: if len(router_output[0].shape) > 1: router_logits, expert_indexes = router_output total_router_logits.append(router_logits) total_expert_indexes.append(expert_indexes) return torch.cat(total_router_logits, dim=1), torch.cat(total_expert_indexes, dim=1)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/gptsan_japanese/__init__.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _import_structure = { "configuration_gptsan_japanese": ["GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTSanJapaneseConfig"], "tokenization_gptsan_japanese": ["GPTSanJapaneseTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_gptsan_japanese"] = [ "GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTSanJapaneseForConditionalGeneration", "GPTSanJapaneseModel", "GPTSanJapanesePreTrainedModel", ] _import_structure["tokenization_gptsan_japanese"] = [ "GPTSanJapaneseTokenizer", ] if TYPE_CHECKING: from .configuration_gptsan_japanese import GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTSanJapaneseConfig from .tokenization_gptsan_japanese import GPTSanJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gptsan_japanese import ( GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTSanJapaneseForConditionalGeneration, GPTSanJapaneseModel, GPTSanJapanesePreTrainedModel, ) from .tokenization_gptsan_japanese import GPTSanJapaneseTokenizer else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0