| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | """ PyTorch Husky model.""" |
| |
|
| | import contextlib |
| | import math |
| | from dataclasses import dataclass |
| | from typing import Any, Optional, Tuple, Union |
| |
|
| | import torch |
| | import torch.utils.checkpoint |
| | from torch import nn |
| | from torch.nn import CrossEntropyLoss |
| |
|
| | from transformers.activations import ACT2FN |
| | from transformers.modeling_outputs import ( |
| | BaseModelOutput, |
| | BaseModelOutputWithPastAndCrossAttentions, |
| | BaseModelOutputWithPooling, |
| | BaseModelOutputWithPoolingAndCrossAttentions, |
| | ) |
| | from transformers.modeling_utils import PreTrainedModel |
| | from transformers.pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer |
| | from transformers.utils import ( |
| | ModelOutput, |
| | add_start_docstrings, |
| | add_start_docstrings_to_model_forward, |
| | logging, |
| | replace_return_docstrings, |
| | ) |
| | from transformers import AutoModelForCausalLM, GenerationConfig |
| |
|
| | from .configuration_husky import HuskyConfig, HuskyQFormerConfig, HuskyVisionConfig |
| |
|
| | logger = logging.get_logger(__name__) |
| |
|
| | _CHECKPOINT_FOR_DOC = "wofmanaf/husky-7b" |
| |
|
| | HUSKY_PRETRAINED_MODEL_ARCHIVE_LIST = [ |
| | "wofmanaf/husky-7b", |
| | ] |
| |
|
| | @dataclass |
| | class HuskyForConditionalGenerationModelOutput(ModelOutput): |
| | """ |
| | Class defining the outputs of [`HuskyForConditionalGeneration`]. |
| | |
| | Args: |
| | loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): |
| | Language modeling loss from the language model. |
| | logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): |
| | Prediction scores of the language modeling head of the language model. |
| | vision_outputs (`BaseModelOutputWithPooling`): |
| | Outputs of the vision encoder. |
| | qformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`): |
| | Outputs of the Q-Former (Querying Transformer). |
| | language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`): |
| | Outputs of the language model. |
| | """ |
| |
|
| | loss: Optional[Tuple[torch.FloatTensor]] = None |
| | logits: Optional[Tuple[torch.FloatTensor]] = None |
| | vision_outputs: Optional[torch.FloatTensor] = None |
| | qformer_outputs: Optional[Tuple[torch.FloatTensor]] = None |
| | language_model_outputs: Optional[Tuple[torch.FloatTensor]] = None |
| |
|
| | def to_tuple(self) -> Tuple[Any]: |
| | return tuple( |
| | self[k] |
| | if k not in ["vision_outputs", "qformer_outputs", "language_model_outputs"] |
| | else getattr(self, k).to_tuple() |
| | for k in self.keys() |
| | ) |
| |
|
| | |
| | class HuskyVisionEmbeddings(nn.Module): |
| | def __init__(self, config: HuskyVisionConfig): |
| | super().__init__() |
| | self.config = config |
| | self.embed_dim = config.hidden_size |
| | self.image_size = config.image_size |
| | self.patch_size = config.patch_size |
| |
|
| | self.class_embedding = nn.Parameter( |
| | torch.randn(1, 1, self.embed_dim), |
| | ) |
| |
|
| | self.patch_embedding = nn.Conv2d( |
| | in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size |
| | ) |
| |
|
| | self.num_patches = (self.image_size // self.patch_size) ** 2 |
| | self.num_positions = self.num_patches + 1 |
| |
|
| | self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim)) |
| |
|
| | def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: |
| | batch_size = pixel_values.shape[0] |
| | target_dtype = self.patch_embedding.weight.dtype |
| | patch_embeds = self.patch_embedding(pixel_values) |
| | patch_embeds = patch_embeds.flatten(2).transpose(1, 2) |
| |
|
| | class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype) |
| | embeddings = torch.cat([class_embeds, patch_embeds], dim=1) |
| | embeddings = embeddings + self.position_embedding[:, : embeddings.size(1), :].to(target_dtype) |
| | return embeddings |
| |
|
| | class HuskyVideoEmbeddings(nn.Module): |
| | def __init__(self, config: HuskyVisionConfig): |
| | super().__init__() |
| | self.config = config |
| | self.embed_dim = config.hidden_size |
| | self.image_size = config.image_size |
| | self.patch_size = config.patch_size |
| | self.num_frames = getattr(self.config, "num_frames", 8) |
| | self.frame_stride = getattr(self.config, "frame_stride", 2) |
| |
|
| | self.class_embedding = nn.Parameter( |
| | torch.randn(1, 1, self.embed_dim), |
| | ) |
| |
|
| | self.patch_embedding = nn.Conv3d( |
| | in_channels=3, out_channels=self.embed_dim, |
| | kernel_size=(self.frame_stride, self.patch_size, self.patch_size), |
| | stride=(self.frame_stride, self.patch_size, self.patch_size) |
| | ) |
| |
|
| | self.num_patches = int(self.num_frames // self.frame_stride) * (self.image_size // self.patch_size) ** 2 |
| | self.num_positions = self.num_patches + 1 |
| |
|
| | self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim)) |
| |
|
| | def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: |
| | batch_size = pixel_values.shape[0] |
| | target_dtype = self.patch_embedding.weight.dtype |
| | patch_embeds = self.patch_embedding(pixel_values) |
| | patch_embeds = patch_embeds.flatten(2).transpose(1, 2) |
| |
|
| | class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype) |
| | embeddings = torch.cat([class_embeds, patch_embeds], dim=1) |
| | embeddings = embeddings + self.position_embedding[:, : embeddings.size(1), :].to(target_dtype) |
| | return embeddings |
| |
|
| | class HuskyAttention(nn.Module): |
| | """Multi-headed attention from 'Attention Is All You Need' paper""" |
| |
|
| | def __init__(self, config): |
| | super().__init__() |
| | self.config = config |
| | self.embed_dim = config.hidden_size |
| | self.num_heads = config.num_attention_heads |
| | self.head_dim = self.embed_dim // self.num_heads |
| | if self.head_dim * self.num_heads != self.embed_dim: |
| | raise ValueError( |
| | f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" |
| | f" {self.num_heads})." |
| | ) |
| | self.scale = self.head_dim ** -0.5 |
| | self.dropout = nn.Dropout(config.attention_dropout) |
| |
|
| | |
| | self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=False) |
| |
|
| | if config.qkv_bias: |
| | q_bias = nn.Parameter(torch.zeros(self.embed_dim)) |
| | v_bias = nn.Parameter(torch.zeros(self.embed_dim)) |
| | else: |
| | q_bias = None |
| | v_bias = None |
| |
|
| | if q_bias is not None: |
| | qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias)) |
| | self.qkv.bias = nn.Parameter(qkv_bias) |
| |
|
| | self.projection = nn.Linear(self.embed_dim, self.embed_dim) |
| |
|
| | def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): |
| | return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() |
| |
|
| | def forward( |
| | self, |
| | hidden_states: torch.Tensor, |
| | head_mask: Optional[torch.Tensor] = None, |
| | output_attentions: Optional[bool] = False, |
| | ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: |
| | """Input shape: Batch x Time x Channel""" |
| |
|
| | bsz, tgt_len, embed_dim = hidden_states.size() |
| |
|
| | mixed_qkv = self.qkv(hidden_states) |
| |
|
| | mixed_qkv = mixed_qkv.reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads).permute( |
| | 2, 0, 3, 1, 4 |
| | ) |
| | query_states, key_states, value_states = ( |
| | mixed_qkv[0], |
| | mixed_qkv[1], |
| | mixed_qkv[2], |
| | ) |
| |
|
| | |
| | attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2)) |
| |
|
| | attention_scores = attention_scores * self.scale |
| |
|
| | |
| | attention_probs = nn.functional.softmax(attention_scores, dim=-1) |
| |
|
| | |
| | |
| | attention_probs = self.dropout(attention_probs) |
| |
|
| | |
| | if head_mask is not None: |
| | attention_probs = attention_probs * head_mask |
| |
|
| | context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3) |
| |
|
| | new_context_layer_shape = context_layer.size()[:-2] + (self.embed_dim,) |
| | context_layer = context_layer.reshape(new_context_layer_shape) |
| |
|
| | output = self.projection(context_layer) |
| |
|
| | outputs = (output, attention_probs) if output_attentions else (output, None) |
| |
|
| | return outputs |
| |
|
| | |
| | class HuskyMLP(nn.Module): |
| | def __init__(self, config): |
| | super().__init__() |
| | self.config = config |
| | self.activation_fn = ACT2FN[config.hidden_act] |
| | self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) |
| | self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) |
| |
|
| | def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
| | hidden_states = self.fc1(hidden_states) |
| | hidden_states = self.activation_fn(hidden_states) |
| | hidden_states = self.fc2(hidden_states) |
| | return hidden_states |
| |
|
| | |
| | class HuskyEncoderLayer(nn.Module): |
| | def __init__(self, config: HuskyConfig): |
| | super().__init__() |
| | self.embed_dim = config.hidden_size |
| | self.self_attn = HuskyAttention(config) |
| | self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) |
| | self.mlp = HuskyMLP(config) |
| | self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) |
| |
|
| | def forward( |
| | self, |
| | hidden_states: torch.Tensor, |
| | attention_mask: torch.Tensor, |
| | output_attentions: Optional[bool] = False, |
| | ) -> Tuple[torch.FloatTensor]: |
| | """ |
| | Args: |
| | hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` |
| | attention_mask (`torch.FloatTensor`): attention mask of size |
| | `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. |
| | `(config.encoder_attention_heads,)`. |
| | output_attentions (`bool`, *optional*): |
| | Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
| | returned tensors for more detail. |
| | """ |
| | residual = hidden_states |
| |
|
| | hidden_states = self.layer_norm1(hidden_states) |
| | hidden_states, attn_weights = self.self_attn( |
| | hidden_states=hidden_states, |
| | head_mask=attention_mask, |
| | output_attentions=output_attentions, |
| | ) |
| | hidden_states = hidden_states + residual |
| | residual = hidden_states |
| | hidden_states = self.layer_norm2(hidden_states) |
| | hidden_states = self.mlp(hidden_states) |
| |
|
| | hidden_states = hidden_states + residual |
| |
|
| | outputs = (hidden_states,) |
| |
|
| | if output_attentions: |
| | outputs += (attn_weights,) |
| |
|
| | return outputs |
| |
|
| | class HuskyPreTrainedModel(PreTrainedModel): |
| | """ |
| | An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained |
| | models. |
| | """ |
| |
|
| | config_class = HuskyConfig |
| | base_model_prefix = "husky" |
| | supports_gradient_checkpointing = True |
| | _keys_to_ignore_on_load_missing = [ |
| | r"position_ids", |
| | r"language_model.encoder.embed_tokens.weight", |
| | r"language_model.decoder.embed_tokens.weight", |
| | r"language_model.lm_head.weight", |
| | ] |
| | _no_split_modules = ["HuskyAttention", "LlamaDecoderLayer", "LlamaForCausalLM"] |
| | _skip_keys_device_placement = "past_key_values" |
| | _keep_in_fp32_modules = ["wo"] |
| |
|
| | def _init_weights(self, module): |
| | """Initialize the weights""" |
| | factor = self.config.initializer_range |
| | if isinstance(module, nn.Conv2d) or isinstance(module, nn.Embedding) or isinstance(module, nn.Linear): |
| | module.weight.data.normal_(mean=0.0, std=factor) |
| | if hasattr(module, "bias") and module.bias is not None: |
| | module.bias.data.zero_() |
| |
|
| | if isinstance(module, HuskyVisionEmbeddings): |
| | if hasattr(self.config, "vision_config"): |
| | factor = self.config.vision_config.initializer_range |
| | nn.init.trunc_normal_(module.position_embedding, mean=0.0, std=factor) |
| | nn.init.trunc_normal_(module.class_embedding, mean=0.0, std=factor) |
| |
|
| | elif isinstance(module, nn.LayerNorm): |
| | module.bias.data.zero_() |
| | module.weight.data.fill_(1.0) |
| | elif isinstance(module, nn.Linear) and module.bias is not None: |
| | module.bias.data.zero_() |
| |
|
| | def _set_gradient_checkpointing(self, module, value=False): |
| | if isinstance(module, HuskyEncoder): |
| | module.gradient_checkpointing = value |
| |
|
| | Husky_START_DOCSTRING = r""" |
| | This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the |
| | library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads |
| | etc.) |
| | |
| | This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. |
| | Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage |
| | and behavior. |
| | |
| | Parameters: |
| | config ([`HuskyConfig`]): Model configuration class with all the parameters of the model. |
| | Initializing with a config file does not load the weights associated with the model, only the |
| | configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. |
| | """ |
| |
|
| | Husky_VISION_INPUTS_DOCSTRING = r""" |
| | Args: |
| | pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): |
| | Pixel values. Pixel values can be obtained using [`HuskyProcessor`]. See [`HuskyProcessor.__call__`] for |
| | details. |
| | output_attentions (`bool`, *optional*): |
| | Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned |
| | tensors for more detail. |
| | output_hidden_states (`bool`, *optional*): |
| | Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for |
| | more detail. |
| | return_dict (`bool`, *optional*): |
| | Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| | """ |
| |
|
| | Husky_TEXT_INPUTS_DOCSTRING = r""" |
| | Args: |
| | input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): |
| | Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide |
| | it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| | [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) |
| | attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): |
| | Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
| | - 1 for tokens that are **not masked**, |
| | - 0 for tokens that are **masked**. |
| | [What are attention masks?](../glossary#attention-mask) |
| | decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): |
| | Indices of decoder input sequence tokens in the vocabulary. |
| | |
| | Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| | [`PreTrainedTokenizer.__call__`] for details. |
| | |
| | [What are decoder input IDs?](../glossary#decoder-input-ids) |
| | |
| | T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` |
| | is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). |
| | |
| | To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5 |
| | Training](./t5#training). |
| | decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): |
| | Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also |
| | be used by default. |
| | output_attentions (`bool`, *optional*): |
| | Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned |
| | tensors for more detail. |
| | output_hidden_states (`bool`, *optional*): |
| | Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for |
| | more detail. |
| | return_dict (`bool`, *optional*): |
| | Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| | """ |
| |
|
| | Husky_INPUTS_DOCSTRING = r""" |
| | Args: |
| | pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): |
| | Pixel values. Pixel values can be obtained using [`HuskyProcessor`]. See [`HuskyProcessor.__call__`] for |
| | details. |
| | |
| | input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| | Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be |
| | provided to serve as text prompt, which the language model can continue. |
| | |
| | Indices can be obtained using [`HuskyProcessor`]. See [`HuskyProcessor.__call__`] for details. |
| | |
| | [What are input IDs?](../glossary#input-ids) |
| | attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): |
| | Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
| | |
| | - 1 for tokens that are **not masked**, |
| | - 0 for tokens that are **masked**. |
| | |
| | [What are attention masks?](../glossary#attention-mask) |
| | |
| | decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): |
| | Indices of decoder input sequence tokens in the vocabulary of the language model. Only relevant in case an |
| | encoder-decoder language model (like T5) is used. |
| | |
| | Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| | [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) |
| | |
| | decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): |
| | Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also |
| | be used by default. |
| | |
| | Only relevant in case an encoder-decoder language model (like T5) is used. |
| | |
| | output_attentions (`bool`, *optional*): |
| | Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned |
| | tensors for more detail. |
| | output_hidden_states (`bool`, *optional*): |
| | Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for |
| | more detail. |
| | return_dict (`bool`, *optional*): |
| | Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| | """ |
| |
|
| | |
| | class HuskyEncoder(nn.Module): |
| | """ |
| | Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a |
| | [`HuskyEncoderLayer`]. |
| | |
| | Args: |
| | config (`HuskyConfig`): |
| | The corresponding vision configuration for the `HuskyEncoder`. |
| | """ |
| |
|
| | def __init__(self, config: HuskyConfig): |
| | super().__init__() |
| | self.config = config |
| | self.layers = nn.ModuleList([HuskyEncoderLayer(config) for _ in range(config.num_hidden_layers)]) |
| | self.gradient_checkpointing = False |
| |
|
| | def forward( |
| | self, |
| | inputs_embeds, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, BaseModelOutput]: |
| | r""" |
| | Args: |
| | inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): |
| | Embedded representation of the inputs. Should be float, not int tokens. |
| | attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): |
| | Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
| | |
| | - 1 for tokens that are **not masked**, |
| | - 0 for tokens that are **masked**. |
| | |
| | [What are attention masks?](../glossary#attention-mask) |
| | output_attentions (`bool`, *optional*): |
| | Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
| | returned tensors for more detail. |
| | output_hidden_states (`bool`, *optional*): |
| | Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors |
| | for more detail. |
| | return_dict (`bool`, *optional*): |
| | Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| | """ |
| | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| | output_hidden_states = ( |
| | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| | ) |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | encoder_states = () if output_hidden_states else None |
| | all_attentions = () if output_attentions else None |
| |
|
| | hidden_states = inputs_embeds |
| | for idx, encoder_layer in enumerate(self.layers): |
| | if output_hidden_states: |
| | encoder_states = encoder_states + (hidden_states,) |
| | if self.gradient_checkpointing and self.training: |
| |
|
| | def create_custom_forward(module): |
| | def custom_forward(*inputs): |
| | return module(*inputs, output_attentions) |
| |
|
| | return custom_forward |
| |
|
| | layer_outputs = torch.utils.checkpoint.checkpoint( |
| | create_custom_forward(encoder_layer), |
| | hidden_states, |
| | attention_mask, |
| | ) |
| | else: |
| | layer_outputs = encoder_layer( |
| | hidden_states, |
| | attention_mask, |
| | output_attentions=output_attentions, |
| | ) |
| |
|
| | hidden_states = layer_outputs[0] |
| |
|
| | if output_attentions: |
| | all_attentions = all_attentions + (layer_outputs[1],) |
| |
|
| | if output_hidden_states: |
| | encoder_states = encoder_states + (hidden_states,) |
| |
|
| | if not return_dict: |
| | return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) |
| | return BaseModelOutput( |
| | last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions |
| | ) |
| |
|
| | |
| | class HuskyVisionModel(HuskyPreTrainedModel): |
| | main_input_name = "pixel_values" |
| | config_class = HuskyVisionConfig |
| |
|
| | def __init__(self, config: HuskyVisionConfig): |
| | super().__init__(config) |
| | self.config = config |
| | embed_dim = config.hidden_size |
| |
|
| | self.embeddings = HuskyVisionEmbeddings(config) |
| | self.video_embeddings = HuskyVideoEmbeddings(config) |
| |
|
| | self.encoder = HuskyEncoder(config) |
| | self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) |
| |
|
| | self.post_init() |
| |
|
| | @add_start_docstrings_to_model_forward(Husky_VISION_INPUTS_DOCSTRING) |
| | |
| | def forward( |
| | self, |
| | pixel_values: Optional[torch.FloatTensor] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, BaseModelOutputWithPooling]: |
| | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| | output_hidden_states = ( |
| | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| | ) |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | if pixel_values is None: |
| | raise ValueError("You have to specify pixel_values") |
| |
|
| | if len(pixel_values.shape) == 4: |
| | hidden_states = self.embeddings(pixel_values) |
| | elif len(pixel_values.shape) == 5: |
| | hidden_states = self.video_embeddings(pixel_values) |
| | else: |
| | raise ValueError(f"wrong pixel_values size: {pixel_values.shape}") |
| |
|
| | encoder_outputs = self.encoder( |
| | inputs_embeds=hidden_states, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| |
|
| | last_hidden_state = encoder_outputs[0] |
| | last_hidden_state = self.post_layernorm(last_hidden_state) |
| |
|
| | pooled_output = last_hidden_state[:, 0, :] |
| | pooled_output = self.post_layernorm(pooled_output) |
| |
|
| | if not return_dict: |
| | return (last_hidden_state, pooled_output) + encoder_outputs[1:] |
| |
|
| | return BaseModelOutputWithPooling( |
| | last_hidden_state=last_hidden_state, |
| | pooler_output=pooled_output, |
| | hidden_states=encoder_outputs.hidden_states, |
| | attentions=encoder_outputs.attentions, |
| | ) |
| |
|
| | def get_input_embeddings(self): |
| | return self.embeddings |
| |
|
| | def get_video_embeddings(self): |
| | return self.video_embeddings |
| |
|
| | class HuskyQFormerMultiHeadAttention(nn.Module): |
| | def __init__(self, config, is_cross_attention=False): |
| | super().__init__() |
| | self.config = config |
| | if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): |
| | raise ValueError( |
| | "The hidden size (%d) is not a multiple of the number of attention heads (%d)" |
| | % (config.hidden_size, config.num_attention_heads) |
| | ) |
| |
|
| | self.num_attention_heads = config.num_attention_heads |
| | self.attention_head_size = int(config.hidden_size / config.num_attention_heads) |
| | self.all_head_size = self.num_attention_heads * self.attention_head_size |
| |
|
| | self.query = nn.Linear(config.hidden_size, self.all_head_size) |
| | if is_cross_attention: |
| | self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size) |
| | self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size) |
| | else: |
| | self.key = nn.Linear(config.hidden_size, self.all_head_size) |
| | self.value = nn.Linear(config.hidden_size, self.all_head_size) |
| |
|
| | self.dropout = nn.Dropout(config.attention_probs_dropout_prob) |
| | self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") |
| | if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": |
| | self.max_position_embeddings = config.max_position_embeddings |
| | self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) |
| | self.save_attention = False |
| |
|
| | def save_attn_gradients(self, attn_gradients): |
| | self.attn_gradients = attn_gradients |
| |
|
| | def get_attn_gradients(self): |
| | return self.attn_gradients |
| |
|
| | def save_attention_map(self, attention_map): |
| | self.attention_map = attention_map |
| |
|
| | def get_attention_map(self): |
| | return self.attention_map |
| |
|
| | def transpose_for_scores(self, x): |
| | new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) |
| | x = x.view(*new_x_shape) |
| | return x.permute(0, 2, 1, 3) |
| |
|
| | def forward( |
| | self, |
| | hidden_states, |
| | attention_mask=None, |
| | head_mask=None, |
| | encoder_hidden_states=None, |
| | encoder_attention_mask=None, |
| | past_key_value=None, |
| | output_attentions=False, |
| | ): |
| | |
| | |
| | |
| | is_cross_attention = encoder_hidden_states is not None |
| |
|
| | if is_cross_attention: |
| | key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) |
| | value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) |
| | attention_mask = encoder_attention_mask |
| | elif past_key_value is not None: |
| | key_layer = self.transpose_for_scores(self.key(hidden_states)) |
| | value_layer = self.transpose_for_scores(self.value(hidden_states)) |
| | key_layer = torch.cat([past_key_value[0], key_layer], dim=2) |
| | value_layer = torch.cat([past_key_value[1], value_layer], dim=2) |
| | else: |
| | key_layer = self.transpose_for_scores(self.key(hidden_states)) |
| | value_layer = self.transpose_for_scores(self.value(hidden_states)) |
| |
|
| | mixed_query_layer = self.query(hidden_states) |
| |
|
| | query_layer = self.transpose_for_scores(mixed_query_layer) |
| |
|
| | past_key_value = (key_layer, value_layer) |
| |
|
| | |
| | attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) |
| |
|
| | if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": |
| | seq_length = hidden_states.size()[1] |
| | position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) |
| | position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) |
| | distance = position_ids_l - position_ids_r |
| | positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) |
| | positional_embedding = positional_embedding.to(dtype=query_layer.dtype) |
| |
|
| | if self.position_embedding_type == "relative_key": |
| | relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) |
| | attention_scores = attention_scores + relative_position_scores |
| | elif self.position_embedding_type == "relative_key_query": |
| | relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) |
| | relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) |
| | attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key |
| |
|
| | attention_scores = attention_scores / math.sqrt(self.attention_head_size) |
| |
|
| | if attention_mask is not None: |
| | |
| | attention_scores = attention_scores + attention_mask |
| |
|
| | |
| | attention_probs = nn.Softmax(dim=-1)(attention_scores) |
| |
|
| | if is_cross_attention and self.save_attention: |
| | self.save_attention_map(attention_probs) |
| | attention_probs.register_hook(self.save_attn_gradients) |
| |
|
| | |
| | |
| | attention_probs_dropped = self.dropout(attention_probs) |
| |
|
| | |
| | if head_mask is not None: |
| | attention_probs_dropped = attention_probs_dropped * head_mask |
| |
|
| | context_layer = torch.matmul(attention_probs_dropped, value_layer) |
| |
|
| | context_layer = context_layer.permute(0, 2, 1, 3).contiguous() |
| | new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) |
| | context_layer = context_layer.view(*new_context_layer_shape) |
| |
|
| | outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) |
| |
|
| | outputs = outputs + (past_key_value,) |
| | return outputs |
| |
|
| | |
| | class HuskyQFormerSelfOutput(nn.Module): |
| | def __init__(self, config): |
| | super().__init__() |
| | self.dense = nn.Linear(config.hidden_size, config.hidden_size) |
| | self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
| | self.dropout = nn.Dropout(config.hidden_dropout_prob) |
| |
|
| | def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: |
| | hidden_states = self.dense(hidden_states) |
| | hidden_states = self.dropout(hidden_states) |
| | hidden_states = self.LayerNorm(hidden_states + input_tensor) |
| | return hidden_states |
| |
|
| | class HuskyQFormerAttention(nn.Module): |
| | def __init__(self, config, is_cross_attention=False): |
| | super().__init__() |
| | self.attention = HuskyQFormerMultiHeadAttention(config, is_cross_attention) |
| | self.output = HuskyQFormerSelfOutput(config) |
| | self.pruned_heads = set() |
| |
|
| | def prune_heads(self, heads): |
| | if len(heads) == 0: |
| | return |
| | heads, index = find_pruneable_heads_and_indices( |
| | heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads |
| | ) |
| |
|
| | |
| | self.attention.query = prune_linear_layer(self.attention.query, index) |
| | self.attention.key = prune_linear_layer(self.attention.key, index) |
| | self.attention.value = prune_linear_layer(self.attention.value, index) |
| | self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) |
| |
|
| | |
| | self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) |
| | self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads |
| | self.pruned_heads = self.pruned_heads.union(heads) |
| |
|
| | def forward( |
| | self, |
| | hidden_states: torch.Tensor, |
| | attention_mask: Optional[torch.FloatTensor] = None, |
| | head_mask: Optional[torch.FloatTensor] = None, |
| | encoder_hidden_states: Optional[torch.FloatTensor] = None, |
| | encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| | past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, |
| | output_attentions: Optional[bool] = False, |
| | ) -> Tuple[torch.Tensor]: |
| | self_outputs = self.attention( |
| | hidden_states, |
| | attention_mask, |
| | head_mask, |
| | encoder_hidden_states, |
| | encoder_attention_mask, |
| | past_key_value, |
| | output_attentions, |
| | ) |
| | attention_output = self.output(self_outputs[0], hidden_states) |
| | outputs = (attention_output,) + self_outputs[1:] |
| | return outputs |
| |
|
| | |
| | class HuskyQFormerIntermediate(nn.Module): |
| | def __init__(self, config): |
| | super().__init__() |
| | self.dense = nn.Linear(config.hidden_size, config.intermediate_size) |
| | if isinstance(config.hidden_act, str): |
| | self.intermediate_act_fn = ACT2FN[config.hidden_act] |
| | else: |
| | self.intermediate_act_fn = config.hidden_act |
| |
|
| | def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
| | hidden_states = self.dense(hidden_states) |
| | hidden_states = self.intermediate_act_fn(hidden_states) |
| | return hidden_states |
| |
|
| | |
| | class HuskyQFormerOutput(nn.Module): |
| | def __init__(self, config): |
| | super().__init__() |
| | self.dense = nn.Linear(config.intermediate_size, config.hidden_size) |
| | self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
| | self.dropout = nn.Dropout(config.hidden_dropout_prob) |
| |
|
| | def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: |
| | hidden_states = self.dense(hidden_states) |
| | hidden_states = self.dropout(hidden_states) |
| | hidden_states = self.LayerNorm(hidden_states + input_tensor) |
| | return hidden_states |
| |
|
| | class HuskyQFormerLayer(nn.Module): |
| | def __init__(self, config, layer_idx): |
| | super().__init__() |
| | self.chunk_size_feed_forward = config.chunk_size_feed_forward |
| | self.seq_len_dim = 1 |
| | self.attention = HuskyQFormerAttention(config) |
| |
|
| | self.layer_idx = layer_idx |
| |
|
| | if layer_idx % config.cross_attention_frequency == 0: |
| | self.crossattention = HuskyQFormerAttention(config, is_cross_attention=True) |
| | self.has_cross_attention = True |
| | else: |
| | self.has_cross_attention = False |
| |
|
| | self.intermediate_query = HuskyQFormerIntermediate(config) |
| | self.output_query = HuskyQFormerOutput(config) |
| |
|
| | def forward( |
| | self, |
| | hidden_states, |
| | attention_mask=None, |
| | head_mask=None, |
| | encoder_hidden_states=None, |
| | encoder_attention_mask=None, |
| | past_key_value=None, |
| | output_attentions=False, |
| | query_length=0, |
| | ): |
| | |
| | self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None |
| | self_attention_outputs = self.attention( |
| | hidden_states, |
| | attention_mask, |
| | head_mask, |
| | output_attentions=output_attentions, |
| | past_key_value=self_attn_past_key_value, |
| | ) |
| | attention_output = self_attention_outputs[0] |
| | outputs = self_attention_outputs[1:-1] |
| |
|
| | present_key_value = self_attention_outputs[-1] |
| |
|
| | if query_length > 0: |
| | query_attention_output = attention_output[:, :query_length, :] |
| |
|
| | if self.has_cross_attention: |
| | if encoder_hidden_states is None: |
| | raise ValueError("encoder_hidden_states must be given for cross-attention layers") |
| | cross_attention_outputs = self.crossattention( |
| | query_attention_output, |
| | attention_mask, |
| | head_mask, |
| | encoder_hidden_states, |
| | encoder_attention_mask, |
| | output_attentions=output_attentions, |
| | ) |
| | query_attention_output = cross_attention_outputs[0] |
| | |
| | outputs = outputs + cross_attention_outputs[1:-1] |
| |
|
| | layer_output = apply_chunking_to_forward( |
| | self.feed_forward_chunk_query, |
| | self.chunk_size_feed_forward, |
| | self.seq_len_dim, |
| | query_attention_output, |
| | ) |
| |
|
| | if attention_output.shape[1] > query_length: |
| | layer_output_text = apply_chunking_to_forward( |
| | self.feed_forward_chunk, |
| | self.chunk_size_feed_forward, |
| | self.seq_len_dim, |
| | attention_output[:, query_length:, :], |
| | ) |
| | layer_output = torch.cat([layer_output, layer_output_text], dim=1) |
| | else: |
| | layer_output = apply_chunking_to_forward( |
| | self.feed_forward_chunk, |
| | self.chunk_size_feed_forward, |
| | self.seq_len_dim, |
| | attention_output, |
| | ) |
| | outputs = (layer_output,) + outputs |
| |
|
| | outputs = outputs + (present_key_value,) |
| |
|
| | return outputs |
| |
|
| | def feed_forward_chunk(self, attention_output): |
| | intermediate_output = self.intermediate(attention_output) |
| | layer_output = self.output(intermediate_output, attention_output) |
| | return layer_output |
| |
|
| | def feed_forward_chunk_query(self, attention_output): |
| | intermediate_output = self.intermediate_query(attention_output) |
| | layer_output = self.output_query(intermediate_output, attention_output) |
| | return layer_output |
| |
|
| | class HuskyQFormerEncoder(nn.Module): |
| | def __init__(self, config): |
| | super().__init__() |
| | self.config = config |
| | self.layer = nn.ModuleList( |
| | [HuskyQFormerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] |
| | ) |
| | self.gradient_checkpointing = False |
| |
|
| | def forward( |
| | self, |
| | hidden_states, |
| | attention_mask=None, |
| | head_mask=None, |
| | encoder_hidden_states=None, |
| | encoder_attention_mask=None, |
| | past_key_values=None, |
| | use_cache=None, |
| | output_attentions=False, |
| | output_hidden_states=False, |
| | return_dict=True, |
| | query_length=0, |
| | ): |
| | all_hidden_states = () if output_hidden_states else None |
| | all_self_attentions = () if output_attentions else None |
| | all_cross_attentions = () if output_attentions else None |
| |
|
| | next_decoder_cache = () if use_cache else None |
| |
|
| | for i in range(self.config.num_hidden_layers): |
| | layer_module = self.layer[i] |
| | if output_hidden_states: |
| | all_hidden_states = all_hidden_states + (hidden_states,) |
| |
|
| | layer_head_mask = head_mask[i] if head_mask is not None else None |
| | past_key_value = past_key_values[i] if past_key_values is not None else None |
| |
|
| | if getattr(self.config, "gradient_checkpointing", False) and self.training: |
| | if use_cache: |
| | logger.warn( |
| | "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." |
| | ) |
| | use_cache = False |
| |
|
| | def create_custom_forward(module): |
| | def custom_forward(*inputs): |
| | return module(*inputs, past_key_value, output_attentions, query_length) |
| |
|
| | return custom_forward |
| |
|
| | layer_outputs = torch.utils.checkpoint.checkpoint( |
| | create_custom_forward(layer_module), |
| | hidden_states, |
| | attention_mask, |
| | layer_head_mask, |
| | encoder_hidden_states, |
| | encoder_attention_mask, |
| | ) |
| | else: |
| | layer_outputs = layer_module( |
| | hidden_states, |
| | attention_mask, |
| | layer_head_mask, |
| | encoder_hidden_states, |
| | encoder_attention_mask, |
| | past_key_value, |
| | output_attentions, |
| | query_length, |
| | ) |
| |
|
| | hidden_states = layer_outputs[0] |
| | if use_cache: |
| | next_decoder_cache += (layer_outputs[-1],) |
| | if output_attentions: |
| | all_self_attentions = all_self_attentions + (layer_outputs[1],) |
| | if layer_module.has_cross_attention: |
| | all_cross_attentions = all_cross_attentions + (layer_outputs[2],) |
| |
|
| | if output_hidden_states: |
| | all_hidden_states = all_hidden_states + (hidden_states,) |
| |
|
| | if not return_dict: |
| | return tuple( |
| | v |
| | for v in [ |
| | hidden_states, |
| | next_decoder_cache, |
| | all_hidden_states, |
| | all_self_attentions, |
| | all_cross_attentions, |
| | ] |
| | if v is not None |
| | ) |
| | return BaseModelOutputWithPastAndCrossAttentions( |
| | last_hidden_state=hidden_states, |
| | past_key_values=next_decoder_cache, |
| | hidden_states=all_hidden_states, |
| | attentions=all_self_attentions, |
| | cross_attentions=all_cross_attentions, |
| | ) |
| |
|
| | class HuskyQFormerModel(HuskyPreTrainedModel): |
| | """ |
| | Querying Transformer (Q-Former), used in Husky. |
| | """ |
| |
|
| | def __init__(self, config: HuskyQFormerConfig): |
| | super().__init__(config) |
| | self.config = config |
| |
|
| | self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
| | self.dropout = nn.Dropout(config.hidden_dropout_prob) |
| |
|
| | self.encoder = HuskyQFormerEncoder(config) |
| |
|
| | self.post_init() |
| |
|
| | def get_input_embeddings(self): |
| | return self.embeddings.word_embeddings |
| |
|
| | def set_input_embeddings(self, value): |
| | self.embeddings.word_embeddings = value |
| |
|
| | def _prune_heads(self, heads_to_prune): |
| | """ |
| | Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base |
| | class PreTrainedModel |
| | """ |
| | for layer, heads in heads_to_prune.items(): |
| | self.encoder.layer[layer].attention.prune_heads(heads) |
| |
|
| | def get_extended_attention_mask( |
| | self, |
| | attention_mask: torch.Tensor, |
| | input_shape: Tuple[int], |
| | device: torch.device, |
| | has_query: bool = False, |
| | ) -> torch.Tensor: |
| | """ |
| | Makes broadcastable attention and causal masks so that future and masked tokens are ignored. |
| | |
| | Arguments: |
| | attention_mask (`torch.Tensor`): |
| | Mask with ones indicating tokens to attend to, zeros for tokens to ignore. |
| | input_shape (`Tuple[int]`): |
| | The shape of the input to the model. |
| | device (`torch.device`): |
| | The device of the input to the model. |
| | |
| | Returns: |
| | `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. |
| | """ |
| | |
| | |
| | if attention_mask.dim() == 3: |
| | extended_attention_mask = attention_mask[:, None, :, :] |
| | elif attention_mask.dim() == 2: |
| | |
| | |
| | extended_attention_mask = attention_mask[:, None, None, :] |
| | else: |
| | raise ValueError( |
| | "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( |
| | input_shape, attention_mask.shape |
| | ) |
| | ) |
| |
|
| | |
| | |
| | |
| | |
| | |
| | extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) |
| | extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 |
| | return extended_attention_mask |
| |
|
| | def forward( |
| | self, |
| | query_embeds, |
| | attention_mask=None, |
| | head_mask=None, |
| | encoder_hidden_states=None, |
| | encoder_attention_mask=None, |
| | past_key_values=None, |
| | use_cache=None, |
| | output_attentions=None, |
| | output_hidden_states=None, |
| | return_dict=None, |
| | ): |
| | r""" |
| | encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, `optional`): |
| | Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if |
| | the model is configured as a decoder. |
| | encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, `optional`): |
| | Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in |
| | the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: |
| | - 1 for tokens that are **not masked**, |
| | - 0 for tokens that are **masked**. |
| | past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of: |
| | shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and |
| | value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are |
| | used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key |
| | value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape |
| | `(batch_size, sequence_length)`. |
| | use_cache (`bool`, `optional`): |
| | If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see |
| | `past_key_values`). |
| | """ |
| | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| | output_hidden_states = ( |
| | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| | ) |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | |
| | past_key_values_length = ( |
| | past_key_values[0][0].shape[2] - self.config.query_length if past_key_values is not None else 0 |
| | ) |
| |
|
| | query_length = query_embeds.shape[1] if query_embeds is not None else 0 |
| |
|
| | embedding_output = self.layernorm(query_embeds) |
| | embedding_output = self.dropout(embedding_output) |
| |
|
| | input_shape = embedding_output.size()[:-1] |
| | batch_size, seq_length = input_shape |
| | device = embedding_output.device |
| |
|
| | if attention_mask is None: |
| | attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) |
| |
|
| | |
| | |
| | extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device) |
| |
|
| | |
| | |
| | if encoder_hidden_states is not None: |
| | if type(encoder_hidden_states) == list: |
| | encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() |
| | else: |
| | ( |
| | encoder_batch_size, |
| | encoder_sequence_length, |
| | _, |
| | ) = encoder_hidden_states.size() |
| | encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) |
| |
|
| | if type(encoder_attention_mask) == list: |
| | encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] |
| | elif encoder_attention_mask is None: |
| | encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) |
| | encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) |
| | else: |
| | encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) |
| | else: |
| | encoder_extended_attention_mask = None |
| |
|
| | |
| | |
| | |
| | |
| | |
| | head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) |
| |
|
| | encoder_outputs = self.encoder( |
| | embedding_output, |
| | attention_mask=extended_attention_mask, |
| | head_mask=head_mask, |
| | encoder_hidden_states=encoder_hidden_states, |
| | encoder_attention_mask=encoder_extended_attention_mask, |
| | past_key_values=past_key_values, |
| | use_cache=use_cache, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | query_length=query_length, |
| | ) |
| | sequence_output = encoder_outputs[0] |
| | pooled_output = sequence_output[:, 0, :] |
| |
|
| | if not return_dict: |
| | return (sequence_output, pooled_output) + encoder_outputs[1:] |
| |
|
| | return BaseModelOutputWithPoolingAndCrossAttentions( |
| | last_hidden_state=sequence_output, |
| | pooler_output=pooled_output, |
| | past_key_values=encoder_outputs.past_key_values, |
| | hidden_states=encoder_outputs.hidden_states, |
| | attentions=encoder_outputs.attentions, |
| | cross_attentions=encoder_outputs.cross_attentions, |
| | ) |
| |
|
| | class AdapterMLP(nn.Module): |
| | def __init__(self, config): |
| | super().__init__() |
| | self.config = config |
| | self.activation_fn = ACT2FN["silu"] |
| | hidden_size = config.vision_config.hidden_size |
| | intermediate_size = hidden_size // 4 |
| | output_size = config.qformer_config.hidden_size |
| |
|
| | self.fc1 = nn.Linear(hidden_size, intermediate_size) |
| | self.fc2 = nn.Linear(intermediate_size, output_size) |
| |
|
| | |
| | |
| | |
| | |
| |
|
| | def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
| | hidden_states = self.fc1(hidden_states) |
| | hidden_states = self.activation_fn(hidden_states) |
| | hidden_states = self.fc2(hidden_states) |
| | return hidden_states |
| |
|
| | @add_start_docstrings( |
| | """ |
| | Husky Model for generating text and image features. The model consists of a vision encoder, Querying Transformer |
| | (Q-Former) and a language model. |
| | """, |
| | Husky_START_DOCSTRING, |
| | ) |
| | class HuskyModel(HuskyPreTrainedModel): |
| | config_class = HuskyConfig |
| | main_input_name = "pixel_values" |
| |
|
| | def __init__(self, config: HuskyConfig): |
| | super().__init__(config) |
| |
|
| | self.vision_model = HuskyVisionModel(config.vision_config) |
| |
|
| | self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size)) |
| | self.qformer = HuskyQFormerModel(config.qformer_config) |
| |
|
| | self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size) |
| | self.language_model = AutoModelForCausalLM.from_config(config.text_config) |
| |
|
| | self.config.hidden_size = config.text_config.hidden_size |
| | self.num_queries = config.num_query_tokens |
| | self.offset = 5 |
| |
|
| | |
| | self.post_init() |
| |
|
| | def get_input_embeddings(self): |
| | return self.language_model.get_input_embeddings() |
| |
|
| | def set_input_embeddings(self, value): |
| | self.language_model.set_input_embeddings(value) |
| |
|
| | def set_output_embeddings(self, new_embeddings): |
| | self.language_model.set_output_embeddings(new_embeddings) |
| |
|
| | def get_output_embeddings(self) -> nn.Module: |
| | return self.language_model.get_output_embeddings() |
| |
|
| | def get_encoder(self): |
| | return self.language_model.get_encoder() |
| |
|
| | def get_decoder(self): |
| | return self.language_model.get_decoder() |
| |
|
| | def _tie_weights(self): |
| | if not self.config.use_decoder_only_language_model: |
| | self.language_model.encoder.embed_tokens = self.language_model.shared |
| | self.language_model.decoder.embed_tokens = self.language_model.shared |
| |
|
| | @add_start_docstrings_to_model_forward(Husky_TEXT_INPUTS_DOCSTRING) |
| | def get_text_features( |
| | self, |
| | input_ids: Optional[torch.Tensor] = None, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ): |
| | r""" |
| | Returns: |
| | text_outputs (`CausalLMOutputWithPast`, or `tuple(torch.FloatTensor)` if `return_dict=False`): |
| | The language model outputs. If `return_dict=True`, the output is a [`CausalLMOutputWithPast`] that |
| | contains the language model logits, the past key values and the hidden states if |
| | `output_hidden_states=True`. |
| | ```""" |
| | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| | output_hidden_states = ( |
| | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| | ) |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | text_outputs = self.language_model( |
| | input_ids=input_ids, |
| | attention_mask=attention_mask, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| |
|
| | return text_outputs |
| |
|
| | @add_start_docstrings_to_model_forward(Husky_VISION_INPUTS_DOCSTRING) |
| | def get_image_features( |
| | self, |
| | pixel_values: Optional[torch.FloatTensor] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ): |
| | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| | output_hidden_states = ( |
| | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| | ) |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | vision_outputs = self.vision_model( |
| | pixel_values=pixel_values, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| |
|
| | return vision_outputs |
| |
|
| | @add_start_docstrings_to_model_forward(Husky_INPUTS_DOCSTRING) |
| | def get_qformer_features( |
| | self, |
| | pixel_values: Optional[torch.FloatTensor] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ): |
| | r""" |
| | Returns: |
| | vision_outputs (`BaseModelOutputWithPooling` or tuple of `torch.FloatTensor`): |
| | The vision model outputs. If `return_dict=True`, the output is a [`BaseModelOutputWithPooling`] that |
| | contains the image features, the pooled image features and the hidden states if |
| | `output_hidden_states=True`. |
| | """ |
| | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| | output_hidden_states = ( |
| | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| | ) |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | vision_outputs = self.vision_model( |
| | pixel_values=pixel_values, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| |
|
| | image_embeds = vision_outputs[0] |
| |
|
| | |
| | image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) |
| |
|
| | query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) |
| | query_outputs = self.qformer( |
| | query_embeds=query_tokens, |
| | encoder_hidden_states=image_embeds, |
| | encoder_attention_mask=image_attention_mask, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| |
|
| | return query_outputs |
| |
|
| | @add_start_docstrings_to_model_forward(Husky_INPUTS_DOCSTRING) |
| | |
| | def forward( |
| | self, |
| | pixel_values: torch.FloatTensor, |
| | input_ids: torch.FloatTensor, |
| | attention_mask: Optional[torch.LongTensor] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | labels: Optional[torch.LongTensor] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, HuskyForConditionalGenerationModelOutput]: |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | |
| | |
| | vision_outputs = self.vision_model( |
| | pixel_values=pixel_values, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| | image_embeds = vision_outputs[0] |
| |
|
| | |
| | image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) |
| |
|
| | query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) |
| | query_outputs = self.qformer( |
| | query_embeds=query_tokens, |
| | encoder_hidden_states=image_embeds, |
| | encoder_attention_mask=image_attention_mask, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| | query_output = query_outputs[0] |
| |
|
| | |
| | language_model_inputs = self.language_projection(query_output) |
| | assert language_model_inputs.shape[1] == self.num_queries |
| |
|
| | inputs_embeds = self.language_model.get_input_embeddings()(input_ids) |
| | |
| | |
| |
|
| | inputs_embeds[:, self.offset:self.offset + self.num_queries, :] = language_model_inputs |
| | if attention_mask is None: |
| | attention_mask = torch.ones_like( |
| | input_ids, dtype=torch.long, device=language_model_inputs.device) |
| |
|
| | outputs = self.language_model( |
| | inputs_embeds=inputs_embeds, |
| | attention_mask=attention_mask, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| | logits = outputs.logits if return_dict else outputs[0] |
| | loss = None |
| | |
| | if labels is not None: |
| | labels = labels.to(logits.device) |
| | logits = logits[:, -labels.size(1):, :] |
| | |
| | shift_logits = logits[..., :-1, :].contiguous() |
| | shift_labels = labels[..., 1:].contiguous().to(logits.device) |
| |
|
| | |
| | loss_fct = CrossEntropyLoss(reduction="mean") |
| |
|
| | loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1)) |
| |
|
| | if not return_dict: |
| | output = (logits, vision_outputs, query_outputs, outputs) |
| | return ((loss,) + output) if loss is not None else output |
| |
|
| | return HuskyForConditionalGenerationModelOutput( |
| | loss=loss, |
| | logits=logits, |
| | vision_outputs=vision_outputs, |
| | qformer_outputs=query_outputs, |
| | language_model_outputs=outputs, |
| | ) |
| |
|
| | @add_start_docstrings( |
| | """ |
| | Husky Model for generating text given an image and an optional text prompt. The model consists of a vision |
| | encoder, Querying Transformer (Q-Former) and a language model. |
| | |
| | One can optionally pass `input_ids` to the model, which serve as a text prompt, to make the language model continue |
| | the prompt. Otherwise, the language model starts generating text from the [BOS] (beginning-of-sequence) token. |
| | """, |
| | Husky_START_DOCSTRING, |
| | ) |
| | class HuskyForConditionalGeneration(HuskyPreTrainedModel): |
| | config_class = HuskyConfig |
| | main_input_name = "pixel_values" |
| |
|
| | def __init__(self, config: HuskyConfig): |
| | super().__init__(config) |
| |
|
| | self.vision_model = HuskyVisionModel(config.vision_config) |
| | self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size)) |
| | self.qformer = HuskyQFormerModel(config.qformer_config) |
| |
|
| | self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size) |
| | self.language_model = AutoModelForCausalLM.from_config(config.text_config) |
| |
|
| | self.config.hidden_size = config.text_config.hidden_size |
| | self.num_queries = config.num_query_tokens |
| | self.offset = 5 |
| |
|
| | self.vision_adapter = AdapterMLP(config) |
| | self.layer_norms = nn.ModuleList() |
| | for i in range(4): |
| | self.layer_norms.append( |
| | nn.LayerNorm(config.vision_config.hidden_size, eps=config.vision_config.layer_norm_eps) |
| | ) |
| |
|
| | |
| | self.post_init() |
| |
|
| | def get_input_embeddings(self): |
| | return self.language_model.get_input_embeddings() |
| |
|
| | def set_input_embeddings(self, value): |
| | self.language_model.set_input_embeddings(value) |
| |
|
| | def set_output_embeddings(self, new_embeddings): |
| | self.language_model.set_output_embeddings(new_embeddings) |
| |
|
| | def get_output_embeddings(self) -> nn.Module: |
| | return self.language_model.get_output_embeddings() |
| |
|
| | def get_encoder(self): |
| | return self.language_model.get_encoder() |
| |
|
| | def get_decoder(self): |
| | return self.language_model.get_decoder() |
| |
|
| | def extract_feature( |
| | self, |
| | pixel_values: torch.FloatTensor, |
| | ): |
| | vision_outputs = self.vision_model( |
| | pixel_values=pixel_values, |
| | output_hidden_states=True, |
| | ) |
| | image_embeds = vision_outputs[0] |
| |
|
| | depth = len(vision_outputs[2]) |
| | indices = range(depth // 4 - 1, depth, depth // 4) |
| | pooled_outputs = [] |
| | for idx, layer_norm in zip(indices, self.layer_norms): |
| | pool_output = vision_outputs[2][idx][:, 0, :].unsqueeze(1) |
| | pool_output = layer_norm(pool_output) |
| | pooled_outputs.append(pool_output) |
| |
|
| | pooled_outputs = torch.cat(pooled_outputs, dim=1) |
| | pooled_outputs = self.vision_adapter(pooled_outputs) |
| |
|
| | |
| | image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) |
| |
|
| | query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) |
| | query_outputs = self.qformer( |
| | query_embeds=query_tokens, |
| | encoder_hidden_states=image_embeds, |
| | encoder_attention_mask=image_attention_mask |
| | ) |
| | query_output = query_outputs[0] |
| | query_output = torch.cat([query_output, pooled_outputs], dim=1) |
| | language_model_inputs = self.language_projection(query_output) |
| |
|
| | return language_model_inputs |
| |
|
| | def _tie_weights(self): |
| | if not self.config.use_decoder_only_language_model: |
| | self.language_model.encoder.embed_tokens = self.language_model.shared |
| | self.language_model.decoder.embed_tokens = self.language_model.shared |
| |
|
| | def _preprocess_accelerate(self): |
| | r""" |
| | Some pre-processing hacks to make the model `accelerate` compatible. Check |
| | https://github.com/huggingface/transformers/pull/21707 for more details. |
| | """ |
| | hf_device_map = self.hf_device_map |
| |
|
| | if len(hf_device_map) > 1 and "language_model" not in hf_device_map and torch.cuda.device_count() > 1: |
| | |
| | logger.warning( |
| | "The `language_model` is not in the `hf_device_map` dictionary and you are running your script" |
| | " in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`." |
| | " Please pass a `device_map` that contains `language_model` to remove this warning." |
| | " Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for" |
| | " more details on creating a `device_map` for large models.", |
| | ) |
| |
|
| | if hasattr(self.language_model, "_hf_hook"): |
| | self.language_model._hf_hook.io_same_device = True |
| |
|
| | @add_start_docstrings_to_model_forward(Husky_INPUTS_DOCSTRING) |
| | |
| | def forward( |
| | self, |
| | pixel_values: torch.FloatTensor, |
| | input_ids: torch.FloatTensor, |
| | attention_mask: Optional[torch.LongTensor] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | labels: Optional[torch.LongTensor] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, HuskyForConditionalGenerationModelOutput]: |
| |
|
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | |
| | |
| | batch_size = input_ids.shape[0] |
| | vision_outputs = self.vision_model( |
| | pixel_values=pixel_values, |
| | output_attentions=output_attentions, |
| | output_hidden_states=True, |
| | return_dict=return_dict, |
| | ) |
| | image_embeds = vision_outputs[0] |
| |
|
| | depth = len(vision_outputs[2]) |
| | indices = range(depth // 4 - 1, depth, depth // 4) |
| | pooled_outputs = [] |
| | for idx, layer_norm in zip(indices, self.layer_norms): |
| | pool_output = vision_outputs[2][idx][:, 0, :].unsqueeze(1) |
| | pool_output = layer_norm(pool_output) |
| | pooled_outputs.append(pool_output) |
| |
|
| | pooled_outputs = torch.cat(pooled_outputs, dim=1) |
| | pooled_outputs = self.vision_adapter(pooled_outputs) |
| |
|
| | |
| | image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) |
| |
|
| | query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) |
| | query_outputs = self.qformer( |
| | query_embeds=query_tokens, |
| | encoder_hidden_states=image_embeds, |
| | encoder_attention_mask=image_attention_mask, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| | query_output = query_outputs[0] |
| | query_output = torch.cat([query_output, pooled_outputs], dim=1) |
| |
|
| | |
| | language_model_inputs = self.language_projection(query_output) |
| | inputs_embeds = self.language_model.get_input_embeddings()(input_ids) |
| | |
| | |
| |
|
| | |
| | prefix_embeds = inputs_embeds[:, :self.offset, :] |
| | postfix_embeds = inputs_embeds[:, self.offset:, :] |
| | inputs_embeds = torch.cat([prefix_embeds, language_model_inputs, postfix_embeds], dim=1) |
| |
|
| | if attention_mask is None: |
| | attention_mask = torch.ones_like( |
| | inputs_embeds, dtype=torch.long, device=language_model_inputs.device) |
| | else: |
| | prefix_mask = attention_mask[:, :self.offset] |
| | postfix_mask = attention_mask[:, self.offset:] |
| | vision_mask = torch.ones(size=(batch_size, self.num_queries + 4), dtype=torch.long, |
| | device=attention_mask.device) |
| | attention_mask = torch.cat([prefix_mask, vision_mask, postfix_mask], dim=-1) |
| |
|
| | outputs = self.language_model( |
| | inputs_embeds=inputs_embeds, |
| | attention_mask=attention_mask, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| | logits = outputs.logits if return_dict else outputs[0] |
| | loss = None |
| | |
| | if labels is not None: |
| | labels = labels.to(logits.device) |
| | logits = logits[:, -labels.size(1):, :] |
| | |
| | shift_logits = logits[..., :-1, :].contiguous() |
| | shift_labels = labels[..., 1:].contiguous().to(logits.device) |
| |
|
| | |
| | loss_fct = CrossEntropyLoss(reduction="mean") |
| |
|
| | loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1)) |
| |
|
| | if not return_dict: |
| | output = (logits, vision_outputs, query_outputs, outputs) |
| | return ((loss,) + output) if loss is not None else output |
| |
|
| | return HuskyForConditionalGenerationModelOutput( |
| | loss=loss, |
| | logits=logits, |
| | vision_outputs=vision_outputs, |
| | qformer_outputs=query_outputs, |
| | language_model_outputs=outputs, |
| | ) |
| |
|
| | @torch.no_grad() |
| | def generate( |
| | self, |
| | pixel_values: Optional[torch.FloatTensor] = None, |
| | input_ids: Optional[torch.LongTensor] = None, |
| | attention_mask: Optional[torch.LongTensor] = None, |
| | language_model_inputs: Optional[torch.FloatTensor] = None, |
| | generation_config: Optional[GenerationConfig] = None, |
| | **generate_kwargs, |
| | ) -> torch.LongTensor: |
| | """ |
| | Overrides `generate` function to be able to use the model as a conditional generator. |
| | |
| | Args: |
| | pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)): |
| | Input images to be processed. |
| | input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): |
| | The sequence used as a prompt for the generation. |
| | attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): |
| | Mask to avoid performing attention on padding token indices |
| | language_model_inputs (`torch.LongTensor` of shape (batch_size, sequence_length, num_channel), *optional*): |
| | The sequence used as the input for the generation |
| | language_model_inputs (`torch.LongTensor` of shape (batch_size, sequence_length, num_channel), *optional*): |
| | The sequence used as the input for the generation |
| | generation_config (`~generation.GenerationConfig`, *optional*): |
| | The generation configuration to be used as base parametrization for the generation call. `**kwargs` |
| | passed to generate matching the attributes of `generation_config` will override them. If |
| | `generation_config` is not provided, the default will be used, which had the following loading |
| | priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model |
| | configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s |
| | default values, whose documentation should be checked to parameterize generation. |
| | |
| | Returns: |
| | captions (list): A list of strings of length batch_size * num_captions. |
| | """ |
| | if hasattr(self, "hf_device_map"): |
| | |
| | self._preprocess_accelerate() |
| |
|
| | if language_model_inputs is None: |
| | vision_outputs = self.vision_model( |
| | pixel_values=pixel_values, |
| | output_hidden_states=True, |
| | ) |
| | image_embeds = vision_outputs[0] |
| |
|
| | depth = len(vision_outputs[2]) |
| | indices = range(depth // 4 - 1, depth, depth // 4) |
| | pooled_outputs = [] |
| | for idx, layer_norm in zip(indices, self.layer_norms): |
| | pool_output = vision_outputs[2][idx][:, 0, :].unsqueeze(1) |
| | pool_output = layer_norm(pool_output) |
| | pooled_outputs.append(pool_output) |
| |
|
| | pooled_outputs = torch.cat(pooled_outputs, dim=1) |
| | pooled_outputs = self.vision_adapter(pooled_outputs) |
| |
|
| | image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) |
| |
|
| | query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) |
| | query_outputs = self.qformer( |
| | query_embeds=query_tokens, |
| | encoder_hidden_states=image_embeds, |
| | encoder_attention_mask=image_attention_mask, |
| | ) |
| | query_output = query_outputs[0] |
| | query_output = torch.cat([query_output, pooled_outputs], dim=1) |
| |
|
| | language_model_inputs = self.language_projection(query_output) |
| |
|
| | batch_size = language_model_inputs.shape[0] |
| | inputs_embeds = self.language_model.get_input_embeddings()(input_ids) |
| |
|
| | prefix_embeds = inputs_embeds[:, :self.offset, :] |
| | postfix_embeds = inputs_embeds[:, self.offset:, :] |
| | inputs_embeds = torch.cat([prefix_embeds, language_model_inputs, postfix_embeds], dim=1) |
| |
|
| | if input_ids is None: |
| | input_ids = ( |
| | torch.LongTensor([[self.config.text_config.bos_token_id]]) |
| | .repeat(batch_size, 1) |
| | .to(inputs_embeds.device) |
| | ) |
| |
|
| | if attention_mask is None: |
| | attention_mask = torch.ones_like( |
| | input_ids, dtype=torch.long, device=language_model_inputs.device) |
| | else: |
| | prefix_mask = attention_mask[:, :self.offset] |
| | postfix_mask = attention_mask[:, self.offset:] |
| | vision_mask = torch.ones(size=(batch_size, self.num_queries + 4), dtype=torch.long, |
| | device=attention_mask.device) |
| | attention_mask = torch.cat([prefix_mask, vision_mask, postfix_mask], dim=-1) |
| |
|
| | outputs = self.language_model.generate( |
| | inputs_embeds=inputs_embeds, |
| | attention_mask=attention_mask, |
| | generation_config=generation_config, |
| | **generate_kwargs, |
| | ) |
| |
|
| | return outputs |
| |
|