Spaces:
Build error
Build error
# coding=utf-8 | |
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. | |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""PyTorch OpenAI GPT-2 model.""" | |
import math | |
import os | |
from typing import Optional, Tuple, Union | |
import torch | |
import torch.utils.checkpoint | |
from torch import nn | |
from torch.cuda.amp import autocast | |
from torch.nn import CrossEntropyLoss | |
from transformers.activations import ACT2FN | |
from transformers.modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions | |
from transformers.pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer | |
from transformers.utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward | |
from transformers.utils.model_parallel_utils import assert_device_map, get_device_map | |
from m4.models import DecoupledEmbedding, DecoupledLinear | |
from m4.models.common import ( | |
expand_inputs_for_generation, | |
prepare_inputs_for_generation, | |
update_model_kwargs_for_generation, | |
) | |
from m4.models.custom_modules import VLOOMPreTrainedModelBase | |
from m4.models.perceiver.perceiver import PerceiverResampler | |
from m4.models.vgpt2.configuration_vgpt2 import VGPT2Config | |
from m4.training.utils import ( | |
compute_perceiver_tflops_per_batch_per_gpu, | |
compute_tflops_per_batch_per_gpu, | |
freeze_model, | |
) | |
from m4.utils import logging | |
logger = logging.get_logger(__name__) | |
_CHECKPOINT_FOR_DOC = "gpt2" | |
_CONFIG_FOR_DOC = "VGPT2Config" | |
_TOKENIZER_FOR_DOC = "GPT2Tokenizer" | |
GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = [ | |
"gpt2", | |
"gpt2-medium", | |
"gpt2-large", | |
"gpt2-xl", | |
"distilgpt2", | |
# See all GPT-2 models at https://huggingface.co/models?filter=gpt2 | |
] | |
def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path): | |
"""Load tf checkpoints in a pytorch model""" | |
try: | |
import re | |
import tensorflow as tf | |
except ImportError: | |
logger.error( | |
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " | |
"https://www.tensorflow.org/install/ for installation instructions." | |
) | |
raise | |
tf_path = os.path.abspath(gpt2_checkpoint_path) | |
logger.info(f"Converting TensorFlow checkpoint from {tf_path}") | |
# Load weights from TF model | |
init_vars = tf.train.list_variables(tf_path) | |
names = [] | |
arrays = [] | |
for name, shape in init_vars: | |
logger.info(f"Loading TF weight {name} with shape {shape}") | |
array = tf.train.load_variable(tf_path, name) | |
names.append(name) | |
arrays.append(array.squeeze()) | |
for name, array in zip(names, arrays): | |
name = name[6:] # skip "model/" | |
name = name.split("/") | |
pointer = model | |
for m_name in name: | |
if re.fullmatch(r"[A-Za-z]+\d+", m_name): | |
scope_names = re.split(r"(\d+)", m_name) | |
else: | |
scope_names = [m_name] | |
if scope_names[0] == "w" or scope_names[0] == "g": | |
pointer = getattr(pointer, "weight") | |
elif scope_names[0] == "b": | |
pointer = getattr(pointer, "bias") | |
elif scope_names[0] == "wpe" or scope_names[0] == "wte": | |
pointer = getattr(pointer, scope_names[0]) | |
pointer = getattr(pointer, "weight") | |
else: | |
pointer = getattr(pointer, scope_names[0]) | |
if len(scope_names) >= 2: | |
num = int(scope_names[1]) | |
pointer = pointer[num] | |
try: | |
assert ( | |
pointer.shape == array.shape | |
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" | |
except AssertionError as e: | |
e.args += (pointer.shape, array.shape) | |
raise | |
logger.info(f"Initialize PyTorch weight {name}") | |
pointer.data = torch.from_numpy(array) | |
return model | |
class GPT2Attention(nn.Module): | |
def __init__(self, config, is_cross_attention=False, layer_idx=None): | |
super().__init__() | |
max_positions = config.max_position_embeddings | |
self.register_buffer( | |
"bias", | |
torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view( | |
1, 1, max_positions, max_positions | |
), | |
) | |
self.register_buffer("masked_bias", torch.tensor(-1e4)) | |
self.embed_dim = config.hidden_size | |
self.num_heads = config.num_attention_heads | |
self.head_dim = self.embed_dim // self.num_heads | |
self.split_size = self.embed_dim | |
if self.head_dim * self.num_heads != self.embed_dim: | |
raise ValueError( | |
f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" | |
f" {self.num_heads})." | |
) | |
self.scale_attn_weights = config.scale_attn_weights | |
self.is_cross_attention = is_cross_attention | |
# Layer-wise attention scaling, reordering, and upcasting | |
self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx | |
self.layer_idx = layer_idx | |
self.reorder_and_upcast_attn = config.reorder_and_upcast_attn | |
if self.is_cross_attention: | |
in_dim = self.embed_dim if not hasattr(config, "vision_embed_dim") else config.vision_embed_dim | |
self.c_attn = Conv1D(2 * self.embed_dim, in_dim) | |
self.q_attn = Conv1D(self.embed_dim, self.embed_dim) | |
else: | |
self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim) | |
self.c_proj = Conv1D(self.embed_dim, self.embed_dim) | |
self.attn_dropout = nn.Dropout(config.attn_pdrop) | |
self.resid_dropout = nn.Dropout(config.resid_pdrop) | |
self.pruned_heads = set() | |
def prune_heads(self, heads): | |
if len(heads) == 0: | |
return | |
heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads) | |
index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)]) | |
# Prune conv1d layers | |
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1) | |
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0) | |
# Update hyper params | |
self.split_size = (self.split_size // self.num_heads) * (self.num_heads - len(heads)) | |
self.num_heads = self.num_heads - len(heads) | |
self.pruned_heads = self.pruned_heads.union(heads) | |
def _attn(self, query, key, value, attention_mask=None, head_mask=None): | |
attn_weights = torch.matmul(query, key.transpose(-1, -2)) | |
if self.scale_attn_weights: | |
attn_weights = attn_weights / torch.tensor( | |
value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device | |
) | |
# Layer-wise attention scaling | |
if self.scale_attn_by_inverse_layer_idx: | |
attn_weights = attn_weights / float(self.layer_idx + 1) | |
if not self.is_cross_attention: | |
# if only "normal" attention layer implements causal mask | |
query_length, key_length = query.size(-2), key.size(-2) | |
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].to(torch.bool) | |
mask_value = torch.finfo(attn_weights.dtype).min | |
# Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`. | |
# Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device` | |
mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device) | |
attn_weights = torch.where(causal_mask, attn_weights, mask_value) | |
if attention_mask is not None: | |
# Apply the attention mask | |
attn_weights = attn_weights + attention_mask | |
attn_weights = nn.functional.softmax(attn_weights, dim=-1) | |
# Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise | |
attn_weights = attn_weights.type(value.dtype) | |
attn_weights = self.attn_dropout(attn_weights) | |
# Mask heads if we want to | |
if head_mask is not None: | |
attn_weights = attn_weights * head_mask | |
attn_output = torch.matmul(attn_weights, value) | |
return attn_output, attn_weights | |
def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None): | |
# Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM) | |
bsz, num_heads, q_seq_len, dk = query.size() | |
_, _, k_seq_len, _ = key.size() | |
# Preallocate attn_weights for `baddbmm` | |
attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device) | |
# Compute Scale Factor | |
scale_factor = 1.0 | |
if self.scale_attn_weights: | |
scale_factor /= float(value.size(-1)) ** 0.5 | |
if self.scale_attn_by_inverse_layer_idx: | |
scale_factor /= float(self.layer_idx + 1) | |
# Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk)) | |
with autocast(enabled=False): | |
q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len) | |
attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor) | |
attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len) | |
if not self.is_cross_attention: | |
# if only "normal" attention layer implements causal mask | |
query_length, key_length = query.size(-2), key.size(-2) | |
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].bool() | |
mask_value = torch.finfo(attn_weights.dtype).min | |
# Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`. | |
# Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device` | |
mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device) | |
attn_weights = torch.where(causal_mask, attn_weights, mask_value) | |
if attention_mask is not None: | |
# Apply the attention mask | |
attn_weights = attn_weights + attention_mask | |
attn_weights = nn.functional.softmax(attn_weights, dim=-1) | |
# Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise | |
if attn_weights.dtype != torch.float32: | |
raise RuntimeError("Error with upcasting, attn_weights does not have dtype torch.float32") | |
attn_weights = attn_weights.type(value.dtype) | |
attn_weights = self.attn_dropout(attn_weights) | |
# Mask heads if we want to | |
if head_mask is not None: | |
attn_weights = attn_weights * head_mask | |
attn_output = torch.matmul(attn_weights, value) | |
return attn_output, attn_weights | |
def _split_heads(self, tensor, num_heads, attn_head_size): | |
""" | |
Splits hidden_size dim into attn_head_size and num_heads | |
""" | |
new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) | |
tensor = tensor.view(new_shape) | |
return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features) | |
def _merge_heads(self, tensor, num_heads, attn_head_size): | |
""" | |
Merges attn_head_size dim and num_attn_heads dim into hidden_size | |
""" | |
tensor = tensor.permute(0, 2, 1, 3).contiguous() | |
new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,) | |
return tensor.view(new_shape) | |
def forward( | |
self, | |
hidden_states: Optional[Tuple[torch.FloatTensor]], | |
layer_past: Optional[Tuple[torch.Tensor]] = None, | |
attention_mask: Optional[torch.FloatTensor] = None, | |
head_mask: Optional[torch.FloatTensor] = None, | |
encoder_hidden_states: Optional[torch.Tensor] = None, | |
encoder_attention_mask: Optional[torch.FloatTensor] = None, | |
use_cache: Optional[bool] = False, | |
output_attentions: Optional[bool] = False, | |
) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]: | |
if encoder_hidden_states is not None: | |
if not hasattr(self, "q_attn"): | |
raise ValueError( | |
"If class is used as cross attention, the weights `q_attn` have to be defined. " | |
"Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`." | |
) | |
query = self.q_attn(hidden_states) | |
key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2) | |
attention_mask = encoder_attention_mask | |
else: | |
query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2) | |
query = self._split_heads(query, self.num_heads, self.head_dim) | |
key = self._split_heads(key, self.num_heads, self.head_dim) | |
value = self._split_heads(value, self.num_heads, self.head_dim) | |
if layer_past is not None: | |
past_key, past_value = layer_past | |
key = torch.cat((past_key, key), dim=-2) | |
value = torch.cat((past_value, value), dim=-2) | |
if use_cache is True: | |
present = (key, value) | |
else: | |
present = None | |
if self.reorder_and_upcast_attn: | |
attn_output, attn_weights = self._upcast_and_reordered_attn(query, key, value, attention_mask, head_mask) | |
else: | |
attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) | |
attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim) | |
attn_output = self.c_proj(attn_output) | |
attn_output = self.resid_dropout(attn_output) | |
outputs = (attn_output, present) | |
if output_attentions: | |
outputs += (attn_weights,) | |
return outputs # a, present, (attentions) | |
class GPT2MLP(nn.Module): | |
def __init__(self, intermediate_size, config): | |
super().__init__() | |
embed_dim = config.hidden_size | |
self.c_fc = Conv1D(intermediate_size, embed_dim) | |
self.c_proj = Conv1D(embed_dim, intermediate_size) | |
self.act = ACT2FN[config.activation_function] | |
self.dropout = nn.Dropout(config.resid_pdrop) | |
def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor: | |
hidden_states = self.c_fc(hidden_states) | |
hidden_states = self.act(hidden_states) | |
hidden_states = self.c_proj(hidden_states) | |
hidden_states = self.dropout(hidden_states) | |
return hidden_states | |
class GPT2Block(nn.Module): | |
def __init__(self, config, layer_idx=None): | |
super().__init__() | |
hidden_size = config.hidden_size | |
inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size | |
self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) | |
self.attn = GPT2Attention(config, layer_idx=layer_idx) | |
self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) | |
if config.add_cross_attention: | |
self.crossattention = GPT2Attention(config, is_cross_attention=True, layer_idx=layer_idx) | |
self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) | |
self.mlp = GPT2MLP(inner_dim, config) | |
def forward( | |
self, | |
hidden_states: Optional[Tuple[torch.FloatTensor]], | |
layer_past: Optional[Tuple[torch.Tensor]] = None, | |
attention_mask: Optional[torch.FloatTensor] = None, | |
head_mask: Optional[torch.FloatTensor] = None, | |
encoder_hidden_states: Optional[torch.Tensor] = None, | |
encoder_attention_mask: Optional[torch.FloatTensor] = None, | |
use_cache: Optional[bool] = False, | |
output_attentions: Optional[bool] = False, | |
) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]: | |
residual = hidden_states | |
hidden_states = self.ln_1(hidden_states) | |
attn_outputs = self.attn( | |
hidden_states, | |
layer_past=layer_past, | |
attention_mask=attention_mask, | |
head_mask=head_mask, | |
use_cache=use_cache, | |
output_attentions=output_attentions, | |
) | |
attn_output = attn_outputs[0] # output_attn: a, present, (attentions) | |
outputs = attn_outputs[1:] | |
# residual connection | |
hidden_states = attn_output + residual | |
if encoder_hidden_states is not None: | |
# add one self-attention block for cross-attention | |
if not hasattr(self, "crossattention"): | |
raise ValueError( | |
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with " | |
"cross-attention layers by setting `config.add_cross_attention=True`" | |
) | |
residual = hidden_states | |
hidden_states = self.ln_cross_attn(hidden_states) | |
cross_attn_outputs = self.crossattention( | |
hidden_states, | |
attention_mask=attention_mask, | |
head_mask=head_mask, | |
encoder_hidden_states=encoder_hidden_states, | |
encoder_attention_mask=encoder_attention_mask, | |
output_attentions=output_attentions, | |
) | |
attn_output = cross_attn_outputs[0] | |
# residual connection | |
hidden_states = residual + attn_output | |
outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights | |
residual = hidden_states | |
hidden_states = self.ln_2(hidden_states) | |
feed_forward_hidden_states = self.mlp(hidden_states) | |
# residual connection | |
hidden_states = residual + feed_forward_hidden_states | |
if use_cache: | |
outputs = (hidden_states,) + outputs | |
else: | |
outputs = (hidden_states,) + outputs[1:] | |
return outputs # hidden_states, present, (attentions, cross_attentions) | |
class VGPT2GatedCrossAttentionBlock(nn.Module): | |
def __init__(self, config, layer_idx=None): | |
super().__init__() | |
hidden_size = config.hidden_size | |
inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size | |
self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) | |
self.cross_attn = GPT2Attention(config, is_cross_attention=True, layer_idx=layer_idx) | |
self.mlp = GPT2MLP(inner_dim, config) | |
self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) | |
self.act = nn.Tanh() | |
if config.alpha_initializer == "zeros": | |
if config.alpha_type == "vector": | |
self.alpha_cross_attn = nn.Parameter(torch.zeros(1, 1, hidden_size)) | |
self.alpha_dense = nn.Parameter(torch.zeros(1, 1, hidden_size)) | |
elif config.alpha_type == "float": | |
self.alpha_cross_attn = nn.Parameter(torch.zeros(1)) | |
self.alpha_dense = nn.Parameter(torch.zeros(1)) | |
else: | |
raise ValueError(f"Unknown value for `alpha_type` ({config.alpha_type})") | |
elif config.alpha_initializer == "ones": | |
if config.alpha_type == "vector": | |
self.alpha_cross_attn = nn.Parameter(torch.ones(1, 1, hidden_size)) | |
self.alpha_dense = nn.Parameter(torch.ones(1, 1, hidden_size)) | |
elif config.alpha_type == "float": | |
self.alpha_cross_attn = nn.Parameter(torch.ones(1)) | |
self.alpha_dense = nn.Parameter(torch.ones(1)) | |
else: | |
raise ValueError(f"Unknown value for `alpha_type` ({config.alpha_type})") | |
elif config.alpha_initializer in {"normal", "gaussian", "random"}: | |
if config.alpha_type == "vector": | |
self.alpha_cross_attn = nn.Parameter( | |
torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1, 1, hidden_size)) | |
) | |
self.alpha_dense = nn.Parameter( | |
torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1, 1, hidden_size)) | |
) | |
elif config.alpha_type == "float": | |
self.alpha_cross_attn = nn.Parameter( | |
torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1)) | |
) | |
self.alpha_dense = nn.Parameter(torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1))) | |
else: | |
raise ValueError(f"Unknown value for `alpha_type` ({config.alpha_type})") | |
else: | |
raise NotImplementedError(f"Alpha initialization scheme {config.alpha_initializer} not yet implemented!") | |
def forward( | |
self, | |
hidden_states: Optional[Tuple[torch.FloatTensor]], | |
layer_past: Optional[Tuple[torch.Tensor]] = None, | |
attention_mask: Optional[torch.FloatTensor] = None, | |
head_mask: Optional[torch.FloatTensor] = None, | |
image_hidden_states: Optional[torch.Tensor] = None, | |
image_attention_mask: Optional[torch.FloatTensor] = None, | |
use_cache: Optional[bool] = False, | |
output_attentions: Optional[bool] = False, | |
) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]: | |
if image_hidden_states is None: | |
raise ValueError( | |
"`image_hidden_states` is required for VGPT2 cross attention module which are visual features to be" | |
" conditioned on." | |
) | |
# add one self-attention block for cross-attention | |
# TODO(aps): Handle cross attention in the outputs | |
# if not hasattr(self, "crossattention"): | |
# raise ValueError( | |
# f"If `image_hidden_states` are passed, {self} has to be instantiated with " | |
# "cross-attention layers by setting `config.add_cross_attention=True`" | |
# ) | |
residual = hidden_states | |
hidden_states = self.ln_1(hidden_states) | |
cross_attn_outputs = self.cross_attn( | |
hidden_states, | |
attention_mask=attention_mask, | |
head_mask=head_mask, | |
encoder_hidden_states=image_hidden_states, | |
encoder_attention_mask=image_attention_mask, | |
output_attentions=output_attentions, | |
) | |
attn_output = cross_attn_outputs[0] | |
outputs = cross_attn_outputs[1:] | |
# residual connection | |
hidden_states = residual + self.act(self.alpha_cross_attn) * attn_output | |
outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights | |
residual = hidden_states | |
hidden_states = self.ln_2(hidden_states) | |
feed_forward_hidden_states = self.mlp(hidden_states) | |
# residual connection | |
hidden_states = residual + self.act(self.alpha_dense) * feed_forward_hidden_states | |
if use_cache: | |
outputs = (hidden_states,) + outputs | |
else: | |
outputs = (hidden_states,) + outputs[1:] | |
return outputs # hidden_states, present, (attentions, cross_attentions) | |
class VGPT2PreTrainedModel(VLOOMPreTrainedModelBase): | |
""" | |
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained | |
models. | |
""" | |
config_class = VGPT2Config | |
load_tf_weights = load_tf_weights_in_gpt2 | |
base_model_prefix = "transformer" | |
is_parallelizable = True | |
supports_gradient_checkpointing = True | |
_no_split_modules = ["GPT2Block"] | |
def __init__(self, *inputs, **kwargs): | |
super().__init__(*inputs, **kwargs) | |
def _init_weights(self, module): | |
"""Initialize the weights.""" | |
if isinstance(module, (nn.Linear, Conv1D)): | |
# Slightly different from the TF version which uses truncated_normal for initialization | |
# cf https://github.com/pytorch/pytorch/pull/5617 | |
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) | |
if module.bias is not None: | |
module.bias.data.zero_() | |
elif isinstance(module, nn.Embedding): | |
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) | |
if module.padding_idx is not None: | |
module.weight.data[module.padding_idx].zero_() | |
elif isinstance(module, nn.LayerNorm): | |
module.bias.data.zero_() | |
module.weight.data.fill_(1.0) | |
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme: | |
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale | |
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers. | |
# > -- GPT-2 :: https://openai.com/blog/better-language-models/ | |
# | |
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py | |
for name, p in module.named_parameters(): | |
if name == "c_proj.weight": | |
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block | |
p.data.normal_(mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.n_layer))) | |
def _set_gradient_checkpointing(self, module, value=False): | |
if isinstance(module, VGPT2Model): | |
module.gradient_checkpointing = value | |
def override_vision_model_wrapper(cls, model, config, vision_model_name, vision_model_params, torch_dtype): | |
# this can be called via from_pretrained from a class w/ head or w/o head so we extract the beheaded model version | |
beheaded_model = model.transformer if hasattr(model, "transformer") else model | |
cls.override_vision_model(beheaded_model, vision_model_name, vision_model_params, torch_dtype) | |
beheaded_model.freeze_relevant_params(config) | |
GPT2_START_DOCSTRING = r""" | |
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the | |
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads | |
etc.) | |
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. | |
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage | |
and behavior. | |
Parameters: | |
config ([`VGPT2Config`]): Model configuration class with all the parameters of the model. | |
Initializing with a config file does not load the weights associated with the model, only the | |
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. | |
""" | |
GPT2_INPUTS_DOCSTRING = r""" | |
Args: | |
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): | |
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else | |
`past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input | |
sequence tokens in the vocabulary. | |
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as | |
`input_ids`. | |
Indices can be obtained using [`GPT2Tokenizer`]. See [`PreTrainedTokenizer.encode`] and | |
[`PreTrainedTokenizer.__call__`] for details. | |
[What are input IDs?](../glossary#input-ids) | |
past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`): | |
Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see | |
`past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have | |
their past given to this model should not be passed as `input_ids` as they have already been computed. | |
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): | |
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: | |
- 1 for tokens that are **not masked**, | |
- 0 for tokens that are **masked**. | |
If `past_key_values` is used, `attention_mask` needs to contain the masking strategy that was used for | |
`past_key_values`. In other words, the `attention_mask` always has to have the length: | |
`len(past_key_values) + len(input_ids)` | |
[What are attention masks?](../glossary#attention-mask) | |
token_type_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*): | |
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, | |
1]`: | |
- 0 corresponds to a *sentence A* token, | |
- 1 corresponds to a *sentence B* token. | |
[What are token type IDs?](../glossary#token-type-ids) | |
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): | |
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, | |
config.max_position_embeddings - 1]`. | |
[What are position IDs?](../glossary#position-ids) | |
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): | |
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: | |
- 1 indicates the head is **not masked**, | |
- 0 indicates the head is **masked**. | |
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): | |
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This | |
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the | |
model's internal embedding lookup matrix. | |
If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see | |
`past_key_values`). | |
use_cache (`bool`, *optional*): | |
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see | |
`past_key_values`). | |
output_attentions (`bool`, *optional*): | |
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned | |
tensors for more detail. | |
output_hidden_states (`bool`, *optional*): | |
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for | |
more detail. | |
return_dict (`bool`, *optional*): | |
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. | |
""" | |
PARALLELIZE_DOCSTRING = r""" | |
This is an experimental feature and is a subject to change at a moment's notice. | |
Uses a device map to distribute attention modules of the model across several devices. If no device map is given, | |
it will evenly distribute blocks across all devices. | |
Args: | |
device_map (`Dict[int, list]`, optional, defaults to None): | |
A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always | |
automatically mapped to the first device (for esoteric reasons). That means that the first device should | |
have fewer attention modules mapped to it than other devices. For reference, the gpt2 models have the | |
following number of attention modules: | |
- gpt2: 12 | |
- gpt2-medium: 24 | |
- gpt2-large: 36 | |
- gpt2-xl: 48 | |
Example: | |
```python | |
# Here is an example of a device map on a machine with 4 GPUs using gpt2-xl, which has a total of 48 attention modules: | |
model = GPT2LMHeadModel.from_pretrained("gpt2-xl") | |
device_map = { | |
0: [0, 1, 2, 3, 4, 5, 6, 7, 8], | |
1: [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21], | |
2: [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34], | |
3: [35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47], | |
} | |
model.parallelize(device_map) | |
``` | |
""" | |
DEPARALLELIZE_DOCSTRING = r""" | |
Moves the model to cpu from a model parallel state. | |
Example: | |
```python | |
# On a 4 GPU machine with gpt2-large: | |
model = GPT2LMHeadModel.from_pretrained("gpt2-large") | |
device_map = { | |
0: [0, 1, 2, 3, 4, 5, 6, 7], | |
1: [8, 9, 10, 11, 12, 13, 14, 15], | |
2: [16, 17, 18, 19, 20, 21, 22, 23], | |
3: [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35], | |
} | |
model.parallelize(device_map) # Splits the model across several devices | |
model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache() | |
``` | |
""" | |
class VGPT2Model(VGPT2PreTrainedModel): | |
_keys_to_ignore_on_load_missing = ["attn.masked_bias"] | |
def __init__(self, config, vision_model=None): | |
super().__init__(config) | |
self.embed_dim = config.hidden_size | |
self.config = config | |
self.wte = DecoupledEmbedding( | |
num_embeddings=config.vocab_size, | |
num_additional_embeddings=config.additional_vocab_size, | |
embedding_dim=self.embed_dim, | |
partially_freeze=config.freeze_text_layers, | |
) | |
self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) | |
self.drop = nn.Dropout(config.embd_pdrop) | |
self.h = nn.ModuleList([GPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)]) | |
self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) | |
self.cross_layer_interval = config.cross_layer_interval | |
num_cross_layers = config.num_hidden_layers // self.cross_layer_interval | |
self.gated_cross_attn_layers = nn.ModuleList( | |
[VGPT2GatedCrossAttentionBlock(config, layer_idx=i) for i in range(num_cross_layers)] | |
) | |
# Perceiver Resampler | |
if config.use_resampler: | |
self.perceiver_resampler = PerceiverResampler( | |
self.config, | |
self.config.vision_embed_dim, | |
config.resampler_depth, | |
config.resampler_n_heads, | |
config.resampler_head_dim, | |
config.resampler_n_latents, | |
) | |
# Model parallel | |
self.model_parallel = False | |
self.device_map = None | |
self.gradient_checkpointing = False | |
# will be vocab_size because of indices starting from 0 | |
self.image_token_idx = config.image_token_index | |
# Load an uninitialized model and later in from_pretrained will load the pre-trained model - | |
# this solves the losing of weights in `from_pretrained` on the main model | |
self.vision_model = vision_model | |
# Initialize weights and apply final processing | |
self.post_init() | |
self.freeze_relevant_params(config) | |
def freeze_relevant_params(self, config=None): | |
if config is None: | |
config = self.config | |
if config.freeze_text_layers: | |
self.freeze_text_layers() | |
if config.freeze_vision_layers: | |
freeze_model(self.vision_model) | |
def freeze_text_layers(self): | |
for module in [self.wpe, self.h, self.ln_f]: | |
freeze_model(module) | |
# TODO(aps): Implement later for VGPT2 | |
def parallelize(self, device_map=None): | |
# Check validity of device_map | |
self.device_map = ( | |
get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map | |
) | |
assert_device_map(self.device_map, len(self.h)) | |
self.model_parallel = True | |
self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys())) | |
self.last_device = "cuda:" + str(max(self.device_map.keys())) | |
self.wte = self.wte.to(self.first_device) | |
self.wpe = self.wpe.to(self.first_device) | |
# Load onto devices | |
for k, v in self.device_map.items(): | |
for block in v: | |
cuda_device = "cuda:" + str(k) | |
self.h[block] = self.h[block].to(cuda_device) | |
# ln_f to last | |
self.ln_f = self.ln_f.to(self.last_device) | |
# TODO(aps): Implement later for VGPT2 | |
def deparallelize(self): | |
self.model_parallel = False | |
self.device_map = None | |
self.first_device = "cpu" | |
self.last_device = "cpu" | |
self.wte = self.wte.to("cpu") | |
self.wpe = self.wpe.to("cpu") | |
for index in range(len(self.h)): | |
self.h[index] = self.h[index].to("cpu") | |
self.ln_f = self.ln_f.to("cpu") | |
torch.cuda.empty_cache() | |
def get_input_embeddings(self): | |
return self.wte | |
def set_input_embeddings(self, new_embeddings): | |
self.wte = new_embeddings | |
def _prune_heads(self, heads_to_prune): | |
""" | |
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} | |
""" | |
for layer, heads in heads_to_prune.items(): | |
self.h[layer].attn.prune_heads(heads) | |
def forward( | |
self, | |
input_ids: Optional[torch.LongTensor] = None, | |
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, | |
attention_mask: Optional[torch.FloatTensor] = None, | |
token_type_ids: Optional[torch.LongTensor] = None, | |
position_ids: Optional[torch.LongTensor] = None, | |
head_mask: Optional[torch.FloatTensor] = None, | |
inputs_embeds: Optional[torch.FloatTensor] = None, | |
pixel_values: Optional[torch.FloatTensor] = None, | |
image_embeddings: Optional[torch.FloatTensor] = None, | |
image_attention_mask: Optional[torch.Tensor] = None, | |
crossblock_head_mask: Optional[torch.Tensor] = None, | |
use_cache: Optional[bool] = None, | |
output_attentions: Optional[bool] = None, | |
output_hidden_states: Optional[bool] = None, | |
return_dict: Optional[bool] = None, | |
) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: | |
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions | |
output_hidden_states = ( | |
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states | |
) | |
use_cache = use_cache if use_cache is not None else self.config.use_cache | |
return_dict = return_dict if return_dict is not None else self.config.use_return_dict | |
if input_ids is not None and inputs_embeds is not None: | |
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") | |
elif input_ids is not None: | |
input_shape = input_ids.size() | |
input_ids = input_ids.view(-1, input_shape[-1]) | |
batch_size = input_ids.shape[0] | |
elif inputs_embeds is not None: | |
input_shape = inputs_embeds.size()[:-1] | |
batch_size = inputs_embeds.shape[0] | |
else: | |
raise ValueError("You have to specify either input_ids or inputs_embeds") | |
device = input_ids.device if input_ids is not None else inputs_embeds.device | |
if token_type_ids is not None: | |
token_type_ids = token_type_ids.view(-1, input_shape[-1]) | |
if position_ids is not None: | |
position_ids = position_ids.view(-1, input_shape[-1]) | |
if past_key_values is None: | |
past_length = 0 | |
past_key_values = tuple([None] * len(self.h)) | |
else: | |
past_length = past_key_values[0][0].size(-2) | |
if position_ids is None: | |
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) | |
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) | |
# GPT2Attention mask. | |
if attention_mask is not None: | |
if batch_size <= 0: | |
raise ValueError("batch_size has to be defined and > 0") | |
attention_mask = attention_mask.view(batch_size, -1) | |
# We create a 3D attention mask from a 2D tensor mask. | |
# Sizes are [batch_size, 1, 1, to_seq_length] | |
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] | |
# this attention mask is more simple than the triangular masking of causal attention | |
# used in OpenAI GPT, we just need to prepare the broadcast dimension here. | |
attention_mask = attention_mask[:, None, None, :] | |
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for | |
# masked positions, this operation will create a tensor which is 0.0 for | |
# positions we want to attend and the dtype's smallest value for masked positions. | |
# Since we are adding it to the raw scores before the softmax, this is | |
# effectively the same as removing these entirely. | |
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility | |
attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min | |
# If a 2D or 3D attention mask is provided for the cross-attention | |
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] | |
if pixel_values is not None and image_embeddings is not None: | |
raise ValueError("You cannot specify both pixel_values and image_embeddings at the same time") | |
elif pixel_values is not None: | |
pixel_values = pixel_values.to(dtype=self.dtype, device=input_ids.device) # fp16 compatibility | |
batch_size, num_images = pixel_values.size(0), pixel_values.size(1) | |
pixel_values = pixel_values.contiguous().view(batch_size * num_images, *pixel_values.shape[2:]) | |
# Get sequence from the vision encoder | |
image_hidden_states = self.vision_model(pixel_values=pixel_values).last_hidden_state | |
elif image_embeddings is not None: | |
batch_size, num_images, image_seq_len, image_hidden_size = image_embeddings.size() | |
image_hidden_states = image_embeddings.to(dtype=self.dtype, device=input_ids.device) | |
image_hidden_states = image_hidden_states.view(batch_size * num_images, image_seq_len, image_hidden_size) | |
if self.config.use_resampler: | |
image_hidden_states = self.perceiver_resampler(image_hidden_states) | |
image_seq_len, image_hidden_size = image_hidden_states.size(1), image_hidden_states.size(2) | |
image_hidden_states = image_hidden_states.view(batch_size, num_images * image_seq_len, image_hidden_size) | |
# Make image_attention_mask compatible with hidden states | |
text_seq_len = image_attention_mask.size(1) | |
image_attention_mask = image_attention_mask.unsqueeze(-1) | |
image_attention_mask = image_attention_mask.repeat(1, 1, 1, image_seq_len) | |
image_attention_mask = image_attention_mask.view(batch_size, text_seq_len, num_images * image_seq_len) | |
if image_hidden_states is not None: | |
image_batch_size, image_sequence_length, _ = image_hidden_states.size() | |
image_hidden_shape = (image_batch_size, image_sequence_length) | |
if image_attention_mask is None: | |
image_attention_mask = torch.ones(image_hidden_shape, device=device) | |
image_attention_mask = self.invert_attention_mask(image_attention_mask) | |
else: | |
image_attention_mask = None | |
# Prepare head mask if needed | |
# 1.0 in head_mask indicate we keep the head | |
# attention_probs has shape bsz x n_heads x N x N | |
# head_mask has shape n_layer x batch x n_heads x N x N | |
head_mask = self.get_head_mask(head_mask, self.config.n_layer) | |
if inputs_embeds is None: | |
inputs_embeds = self.wte(input_ids) | |
position_embeds = self.wpe(position_ids) | |
hidden_states = inputs_embeds + position_embeds | |
if token_type_ids is not None: | |
token_type_embeds = self.wte(token_type_ids) | |
hidden_states = hidden_states + token_type_embeds | |
hidden_states = self.drop(hidden_states) | |
output_shape = input_shape + (hidden_states.size(-1),) | |
presents = () if use_cache else None | |
all_self_attentions = () if output_attentions else None | |
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None | |
all_hidden_states = () if output_hidden_states else None | |
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): | |
# Model parallel | |
if self.model_parallel: | |
torch.cuda.set_device(hidden_states.device) | |
# Ensure layer_past is on same device as hidden_states (might not be correct) | |
if layer_past is not None: | |
layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past) | |
# Ensure that attention_mask is always on the same device as hidden_states | |
if attention_mask is not None: | |
attention_mask = attention_mask.to(hidden_states.device) | |
if isinstance(head_mask, torch.Tensor): | |
head_mask = head_mask.to(hidden_states.device) | |
if output_hidden_states: | |
all_hidden_states = all_hidden_states + (hidden_states,) | |
def vblock( | |
main_block, | |
hidden_states, | |
layer_past, | |
attention_mask, | |
layer_head_mask, | |
use_cache, | |
output_attentions, | |
image_hidden_states, | |
image_attention_mask, | |
layer_idx, | |
cross_layer_interval, | |
gated_cross_attn_layers, | |
): | |
# TODO(aps): Add cross attention values to respective lists | |
# TODO(aps): Add xblock head mask support | |
if layer_idx % cross_layer_interval == 0: | |
xblock = gated_cross_attn_layers[layer_idx // cross_layer_interval] | |
outputs = xblock( | |
hidden_states, | |
attention_mask=attention_mask, | |
image_hidden_states=image_hidden_states, | |
image_attention_mask=image_attention_mask, | |
use_cache=use_cache, | |
output_attentions=output_attentions, | |
) | |
hidden_states = outputs[0] | |
outputs = main_block( | |
hidden_states, | |
layer_past=layer_past, | |
attention_mask=attention_mask, | |
head_mask=layer_head_mask, | |
use_cache=use_cache, | |
output_attentions=output_attentions, | |
) | |
return outputs | |
if self.gradient_checkpointing and self.training: | |
layer_past = None | |
if use_cache: | |
logger.warning_once( | |
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." | |
) | |
use_cache = False | |
outputs = torch.utils.checkpoint.checkpoint( | |
vblock, | |
block, | |
hidden_states, | |
layer_past, | |
attention_mask, | |
head_mask[i], | |
use_cache, | |
output_attentions, | |
image_hidden_states, | |
image_attention_mask, | |
i, | |
self.cross_layer_interval, | |
self.gated_cross_attn_layers, | |
) | |
else: | |
outputs = vblock( | |
block, | |
hidden_states, | |
layer_past=layer_past, | |
attention_mask=attention_mask, | |
layer_head_mask=head_mask[i], | |
use_cache=use_cache, | |
output_attentions=output_attentions, | |
image_hidden_states=image_hidden_states, | |
image_attention_mask=image_attention_mask, | |
layer_idx=i, | |
cross_layer_interval=self.cross_layer_interval, | |
gated_cross_attn_layers=self.gated_cross_attn_layers, | |
) | |
hidden_states = outputs[0] | |
if use_cache is True: | |
presents = presents + (outputs[1],) | |
if output_attentions: | |
all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) | |
if self.config.add_cross_attention: | |
all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],) | |
# Model Parallel: If it's the last layer for that device, put things on the next device | |
if self.model_parallel: | |
for k, v in self.device_map.items(): | |
if i == v[-1] and "cuda:" + str(k) != self.last_device: | |
hidden_states = hidden_states.to("cuda:" + str(k + 1)) | |
hidden_states = self.ln_f(hidden_states) | |
hidden_states = hidden_states.view(output_shape) | |
# Add last hidden state | |
if output_hidden_states: | |
all_hidden_states = all_hidden_states + (hidden_states,) | |
if not return_dict: | |
return tuple( | |
v | |
for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions] | |
if v is not None | |
) | |
return BaseModelOutputWithPastAndCrossAttentions( | |
last_hidden_state=hidden_states, | |
past_key_values=presents, | |
hidden_states=all_hidden_states, | |
attentions=all_self_attentions, | |
cross_attentions=all_cross_attentions, | |
) | |
class VGPT2LMHeadModel(VGPT2PreTrainedModel): | |
_keys_to_ignore_on_load_missing = [r"attn.masked_bias", r"attn.bias", r"lm_head.weight"] | |
def __init__(self, config, vision_model=None): | |
super().__init__(config) | |
self.transformer = VGPT2Model(config, vision_model=vision_model) | |
self.lm_head = DecoupledLinear( | |
in_features=config.n_embd, | |
out_features=config.vocab_size, | |
out_additional_features=config.additional_vocab_size, | |
bias=False, | |
partially_freeze=config.freeze_lm_head, | |
) | |
# Model parallel | |
self.model_parallel = False | |
self.device_map = None | |
# Initialize weights and apply final processing | |
self.post_init() | |
def parallelize(self, device_map=None): | |
self.device_map = ( | |
get_device_map(len(self.transformer.h), range(torch.cuda.device_count())) | |
if device_map is None | |
else device_map | |
) | |
assert_device_map(self.device_map, len(self.transformer.h)) | |
self.transformer.parallelize(self.device_map) | |
self.lm_head = self.lm_head.to(self.transformer.first_device) | |
self.model_parallel = True | |
def deparallelize(self): | |
self.transformer.deparallelize() | |
self.transformer = self.transformer.to("cpu") | |
self.lm_head = self.lm_head.to("cpu") | |
self.model_parallel = False | |
torch.cuda.empty_cache() | |
def get_output_embeddings(self): | |
return self.lm_head | |
def set_output_embeddings(self, new_embeddings): | |
self.lm_head = new_embeddings | |
def tie_weights(self): | |
""" | |
Overwrite `transformers.modeling_utils.PreTrainedModel.tie_weights` to handle the case of DecoupledLinear and DecoupledEmbedding. | |
""" | |
output_embeddings = self.get_output_embeddings() | |
input_embeddings = self.get_input_embeddings() | |
if getattr(self.config, "tie_word_embeddings", True): | |
output_embeddings.weight = input_embeddings.weight | |
if input_embeddings.num_additional_embeddings > 0: | |
assert output_embeddings.out_additional_features == input_embeddings.num_additional_embeddings | |
output_embeddings.additional_fc.weight = input_embeddings.additional_embedding.weight | |
if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"): | |
output_embeddings.out_features = input_embeddings.num_embeddings | |
if hasattr(output_embeddings, "out_additional_features") and hasattr( | |
input_embeddings, "num_additional_embeddings" | |
): | |
output_embeddings.out_additional_features = input_embeddings.num_additional_embeddings | |
def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs): | |
return prepare_inputs_for_generation(input_ids, past=past, **kwargs) | |
def _expand_inputs_for_generation( | |
*args, | |
**model_kwargs, | |
): | |
return expand_inputs_for_generation(*args, **model_kwargs) | |
def _update_model_kwargs_for_generation(outputs, model_kwargs, is_encoder_decoder=False): | |
return update_model_kwargs_for_generation(outputs, model_kwargs, is_encoder_decoder) | |
def forward( | |
self, | |
input_ids: Optional[torch.LongTensor] = None, | |
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, | |
attention_mask: Optional[torch.FloatTensor] = None, | |
token_type_ids: Optional[torch.LongTensor] = None, | |
position_ids: Optional[torch.LongTensor] = None, | |
head_mask: Optional[torch.FloatTensor] = None, | |
inputs_embeds: Optional[torch.FloatTensor] = None, | |
pixel_values: Optional[torch.FloatTensor] = None, | |
image_embeddings: Optional[torch.FloatTensor] = None, | |
image_attention_mask: Optional[torch.Tensor] = None, | |
crossblock_head_mask: Optional[torch.Tensor] = None, | |
labels: Optional[torch.LongTensor] = None, | |
use_cache: Optional[bool] = None, | |
output_attentions: Optional[bool] = None, | |
output_hidden_states: Optional[bool] = None, | |
return_dict: Optional[bool] = None, | |
) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: | |
r""" | |
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): | |
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set | |
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` | |
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` | |
""" | |
return_dict = return_dict if return_dict is not None else self.config.use_return_dict | |
transformer_outputs = self.transformer( | |
input_ids, | |
past_key_values=past_key_values, | |
attention_mask=attention_mask, | |
token_type_ids=token_type_ids, | |
position_ids=position_ids, | |
head_mask=head_mask, | |
inputs_embeds=inputs_embeds, | |
pixel_values=pixel_values, | |
image_embeddings=image_embeddings, | |
image_attention_mask=image_attention_mask, | |
crossblock_head_mask=crossblock_head_mask, | |
use_cache=use_cache, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
return_dict=return_dict, | |
) | |
hidden_states = transformer_outputs[0] | |
# Set device for model parallelism | |
if self.model_parallel: | |
torch.cuda.set_device(self.transformer.first_device) | |
hidden_states = hidden_states.to(self.lm_head.weight.device) | |
lm_logits = self.lm_head(hidden_states) | |
loss = None | |
if labels is not None: | |
# Shift so that tokens < n predict n | |
if attention_mask is not None: | |
shift_attention_mask = attention_mask[..., 1:] | |
shift_logits = lm_logits[..., :-1, :][shift_attention_mask != 0].contiguous() | |
shift_labels = labels[..., 1:][shift_attention_mask != 0].contiguous() | |
else: | |
shift_logits = lm_logits[..., :-1, :].contiguous() | |
shift_labels = labels[..., 1:].contiguous() | |
# Flatten the tokens | |
loss_fct = CrossEntropyLoss() | |
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) | |
if not return_dict: | |
output = (lm_logits,) + transformer_outputs[1:] | |
return ((loss,) + output) if loss is not None else output | |
return CausalLMOutputWithCrossAttentions( | |
loss=loss, | |
logits=lm_logits, | |
past_key_values=transformer_outputs.past_key_values, | |
hidden_states=transformer_outputs.hidden_states, | |
attentions=transformer_outputs.attentions, | |
cross_attentions=transformer_outputs.cross_attentions, | |
) | |
def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]: | |
""" | |
This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or | |
[`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct | |
beam_idx at every generation step. | |
""" | |
return tuple( | |
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) | |
for layer_past in past | |
) | |
def get_model_tflops_per_batch_per_gpu(self, hparams, data_param, tokenizer, max_num_images): | |
config_vl_model = self.config | |
language_embed_size = config_vl_model.n_embd | |
num_language_layers = config_vl_model.n_layer | |
ffn_inner_size = config_vl_model.n_inner | |
vision_config = self.transformer.vision_model.config | |
if hasattr(vision_config, "vision_config"): | |
vision_config = vision_config.vision_config | |
# Get vision model blocks infos | |
vision_patch_size = vision_config.patch_size | |
vision_hidden_size = vision_config.hidden_size | |
num_vision_layers = vision_config.num_hidden_layers | |
# The +1 is for the CLS token | |
single_image_seq_len = (vision_config.image_size // vision_patch_size) ** 2 + 1 | |
vision_exp_factor = vision_config.intermediate_size // vision_hidden_size | |
# Get language and cross-att blocks infos | |
num_cross_attn_layers = num_language_layers // config_vl_model.cross_layer_interval | |
language_seq_len = data_param.max_seq_len | |
language_exp_factor = (ffn_inner_size // language_embed_size) if ffn_inner_size is not None else 4 | |
cross_att_exp_factor = (ffn_inner_size // language_embed_size) if ffn_inner_size is not None else 4 | |
k_v_cross_attn_seq_len = ( | |
(self.config.resampler_n_latents * max_num_images) | |
if self.config.use_resampler | |
else (single_image_seq_len * max_num_images) | |
) | |
language_tflops_per_batch_per_gpu = compute_tflops_per_batch_per_gpu( | |
num_layers=num_language_layers, | |
batch_size=hparams.batch_size_per_gpu, | |
q_seq_len=language_seq_len, | |
k_seq_len=language_seq_len, | |
hidden_size=language_embed_size, | |
kv_in_dim=language_embed_size, | |
ff_exp_factor=language_exp_factor, | |
grad_acc_size=hparams.grad_acc_size, | |
swiglu=False, | |
vocab_size=tokenizer.vocab_size, | |
count_backward=True, # Always True regardless of freezing, because gradients are computed for cross-attentions | |
use_grad_checkpointing=hparams.gradient_checkpointing, | |
) | |
cross_attention_tflops_per_batch_per_gpu = compute_tflops_per_batch_per_gpu( | |
num_layers=num_cross_attn_layers, | |
batch_size=hparams.batch_size_per_gpu, | |
q_seq_len=language_seq_len, | |
k_seq_len=k_v_cross_attn_seq_len, | |
hidden_size=language_embed_size, | |
kv_in_dim=vision_hidden_size, | |
ff_exp_factor=cross_att_exp_factor, | |
grad_acc_size=hparams.grad_acc_size, | |
swiglu=False, | |
vocab_size=None, | |
count_backward=True, | |
use_grad_checkpointing=hparams.gradient_checkpointing, | |
) | |
vision_tflops_per_batch_per_gpu = compute_tflops_per_batch_per_gpu( | |
num_layers=num_vision_layers, | |
batch_size=hparams.batch_size_per_gpu * max_num_images, | |
q_seq_len=single_image_seq_len, | |
k_seq_len=single_image_seq_len, | |
hidden_size=vision_hidden_size, | |
kv_in_dim=vision_hidden_size, | |
ff_exp_factor=vision_exp_factor, | |
grad_acc_size=hparams.grad_acc_size, | |
swiglu=False, | |
vocab_size=None, | |
count_backward=not hparams.model_params["freeze_vision_layers"], | |
use_grad_checkpointing=hparams.gradient_checkpointing, | |
) | |
if self.config.use_resampler: | |
perceiver_tflops_per_batch_per_gpu = compute_perceiver_tflops_per_batch_per_gpu( | |
num_layers=self.config.resampler_depth, | |
batch_size=hparams.batch_size_per_gpu * max_num_images, | |
q_seq_len=self.config.resampler_n_latents, | |
vision_embed_seq_len=single_image_seq_len, | |
q_k_v_input_dim=vision_hidden_size, | |
attention_hidden_size=self.config.resampler_n_heads * self.config.resampler_head_dim, | |
ff_exp_factor=cross_att_exp_factor, | |
count_backward=True, | |
use_grad_checkpointing=hparams.gradient_checkpointing, | |
) | |
flop_count = ( | |
language_tflops_per_batch_per_gpu | |
+ cross_attention_tflops_per_batch_per_gpu | |
+ vision_tflops_per_batch_per_gpu | |
+ perceiver_tflops_per_batch_per_gpu | |
) | |
else: | |
flop_count = ( | |
language_tflops_per_batch_per_gpu | |
+ cross_attention_tflops_per_batch_per_gpu | |
+ vision_tflops_per_batch_per_gpu | |
) | |
return flop_count | |