|
import copy |
|
import math |
|
from typing import Dict, Optional |
|
|
|
import torch |
|
import torch.nn as nn |
|
from torch import Tensor |
|
from torch.utils.checkpoint import checkpoint |
|
from transformers.activations import ACT2FN |
|
from transformers.file_utils import DUMMY_INPUTS, DUMMY_MASK, is_torch_fx_proxy |
|
from transformers.modeling_outputs import ( |
|
BaseModelOutput, |
|
BaseModelOutputWithPastAndCrossAttentions, |
|
Seq2SeqLMOutput, |
|
Seq2SeqModelOutput, |
|
) |
|
from transformers.modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer |
|
from transformers.utils import logging |
|
from transformers.utils.model_parallel_utils import assert_device_map, get_device_map |
|
|
|
from .configuration_fairseq_t5 import FairseqT5Config |
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
|
|
def make_positions(tensor, padding_idx: int, onnx_trace: bool = False): |
|
"""Replace non-padding symbols with their position numbers. |
|
Position numbers begin at padding_idx+1. Padding symbols are ignored. |
|
""" |
|
|
|
|
|
|
|
|
|
mask = tensor.ne(padding_idx).int() |
|
return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx |
|
|
|
|
|
class LearnedPositionalEmbedding(nn.Embedding): |
|
""" |
|
This module learns positional embeddings up to a fixed maximum size. |
|
Padding ids are ignored by either offsetting based on padding_idx |
|
or by setting padding_idx to None and ensuring that the appropriate |
|
position ids are passed to the forward function. |
|
""" |
|
|
|
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int): |
|
super().__init__(num_embeddings, embedding_dim, padding_idx) |
|
self.onnx_trace = False |
|
if self.padding_idx is not None: |
|
self.max_positions = self.num_embeddings - self.padding_idx - 1 |
|
else: |
|
self.max_positions = self.num_embeddings |
|
|
|
def forward( |
|
self, |
|
input: Tensor, |
|
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, |
|
positions: Optional[Tensor] = None, |
|
offset=0, |
|
): |
|
"""Input is expected to be of size [bsz x seqlen].""" |
|
assert (positions is None) or ( |
|
self.padding_idx is None |
|
), "If positions is pre-computed then padding_idx should not be set." |
|
|
|
if positions is None: |
|
if incremental_state is not None: |
|
|
|
|
|
positions = torch.zeros( |
|
(1, 1), device=input.device, dtype=input.dtype |
|
).fill_(int(self.padding_idx + input.size(1))) |
|
else: |
|
positions = make_positions( |
|
input, self.padding_idx, onnx_trace=self.onnx_trace |
|
) |
|
if offset > 0 and positions.size(1) == 1: |
|
positions = positions + offset |
|
return nn.functional.embedding( |
|
positions, |
|
self.weight, |
|
self.padding_idx, |
|
self.max_norm, |
|
self.norm_type, |
|
self.scale_grad_by_freq, |
|
self.sparse, |
|
) |
|
|
|
|
|
def PositionalEmbedding( |
|
num_embeddings: int, |
|
embedding_dim: int, |
|
padding_idx: int, |
|
): |
|
|
|
|
|
|
|
|
|
if padding_idx is not None: |
|
num_embeddings = num_embeddings + padding_idx + 1 |
|
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx) |
|
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) |
|
if padding_idx is not None: |
|
nn.init.constant_(m.weight[padding_idx], 0) |
|
return m |
|
|
|
|
|
class T5LayerNorm(nn.Module): |
|
def __init__(self, hidden_size, eps=1e-5): |
|
""" |
|
Construct a layernorm module in the T5 style No bias and no subtraction of mean. |
|
""" |
|
super().__init__() |
|
self.weight = nn.Parameter(torch.ones(hidden_size)) |
|
self.bias = nn.Parameter(torch.ones(hidden_size)) |
|
self.variance_epsilon = eps |
|
|
|
def forward(self, hidden_states): |
|
|
|
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) |
|
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) |
|
|
|
|
|
if self.weight.dtype in [torch.float16, torch.bfloat16]: |
|
hidden_states = hidden_states.to(self.weight.dtype) |
|
|
|
return self.weight * hidden_states + self.bias |
|
|
|
|
|
def FST5LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False): |
|
return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine) |
|
|
|
|
|
class T5DenseReluDense(nn.Module): |
|
def __init__(self, config): |
|
super().__init__() |
|
if_bias = True |
|
self.wi = nn.Linear(config.d_model, config.d_ff, bias=if_bias) |
|
self.wo = nn.Linear(config.d_ff, config.d_model, bias=if_bias) |
|
self.dropout = nn.Dropout(config.dropout_rate) |
|
|
|
def forward(self, hidden_states): |
|
hidden_states = self.wi(hidden_states) |
|
hidden_states = nn.functional.relu(hidden_states) |
|
hidden_states = self.dropout(hidden_states) |
|
hidden_states = self.wo(hidden_states) |
|
return hidden_states |
|
|
|
|
|
class T5DenseGatedGeluDense(nn.Module): |
|
def __init__(self, config): |
|
super().__init__() |
|
self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) |
|
self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) |
|
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) |
|
self.dropout = nn.Dropout(config.dropout_rate) |
|
self.gelu_act = ACT2FN["gelu_new"] |
|
|
|
def forward(self, hidden_states): |
|
hidden_gelu = self.gelu_act(self.wi_0(hidden_states)) |
|
hidden_linear = self.wi_1(hidden_states) |
|
hidden_states = hidden_gelu * hidden_linear |
|
hidden_states = self.dropout(hidden_states) |
|
hidden_states = self.wo(hidden_states) |
|
return hidden_states |
|
|
|
|
|
class T5LayerFF(nn.Module): |
|
def __init__(self, config, normalize_before=False): |
|
super().__init__() |
|
if config.feed_forward_proj == "relu": |
|
self.DenseReluDense = T5DenseReluDense(config) |
|
elif config.feed_forward_proj == "gated-gelu": |
|
self.DenseReluDense = T5DenseGatedGeluDense(config) |
|
else: |
|
raise ValueError( |
|
f"{self.config.feed_forward_proj} is not supported. Choose between `relu` and `gated-gelu`" |
|
) |
|
|
|
self.layer_norm = FST5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) |
|
self.dropout = nn.Dropout(config.dropout_rate) |
|
|
|
self.normalize_before = normalize_before |
|
|
|
def forward(self, hidden_states): |
|
if self.normalize_before: |
|
forwarded_states = self.layer_norm(hidden_states) |
|
else: |
|
forwarded_states = hidden_states |
|
forwarded_states = self.DenseReluDense(forwarded_states) |
|
hidden_states = hidden_states + self.dropout(forwarded_states) |
|
|
|
if not self.normalize_before: |
|
hidden_states = self.layer_norm(hidden_states) |
|
return hidden_states |
|
|
|
|
|
class T5Attention(nn.Module): |
|
def __init__(self, config: FairseqT5Config, has_relative_attention_bias=False): |
|
super().__init__() |
|
self.is_decoder = config.is_decoder |
|
self.has_relative_attention_bias = has_relative_attention_bias |
|
|
|
self.relative_attention_num_buckets = config.relative_attention_num_buckets |
|
self.relative_attention_max_distance = config.relative_attention_max_distance |
|
self.max_positions = config.max_positions |
|
self.d_model = config.d_model |
|
self.key_value_proj_dim = config.d_kv |
|
self.n_heads = config.num_heads |
|
self.dropout = config.dropout_rate |
|
self.inner_dim = self.n_heads * self.key_value_proj_dim |
|
|
|
|
|
if_bias = True |
|
self.q = nn.Linear(self.d_model, self.inner_dim, bias=if_bias) |
|
self.k = nn.Linear(self.d_model, self.inner_dim, bias=if_bias) |
|
self.v = nn.Linear(self.d_model, self.inner_dim, bias=if_bias) |
|
self.o = nn.Linear(self.inner_dim, self.d_model, bias=if_bias) |
|
|
|
if self.has_relative_attention_bias: |
|
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) |
|
self.pruned_heads = set() |
|
self.gradient_checkpointing = getattr(config, "gradient_checkpointing", False) |
|
|
|
|
|
relative_position = ( |
|
torch.arange(self.max_positions, dtype=torch.long)[None, :] |
|
- torch.arange(self.max_positions, dtype=torch.long)[:, None] |
|
) |
|
self.rp_bucket = self.relative_position_bucket( |
|
relative_position, |
|
num_buckets=self.relative_attention_num_buckets, |
|
max_distance=self.relative_attention_max_distance |
|
) |
|
self.rp_bucket -= self.rp_bucket.min() |
|
|
|
self.head_dim = self.d_model // self.n_heads |
|
self.scaling = self.head_dim ** -0.5 |
|
|
|
def prune_heads(self, heads): |
|
if len(heads) == 0: |
|
return |
|
heads, index = find_pruneable_heads_and_indices( |
|
heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads |
|
) |
|
|
|
self.q = prune_linear_layer(self.q, index) |
|
self.k = prune_linear_layer(self.k, index) |
|
self.v = prune_linear_layer(self.v, index) |
|
self.o = prune_linear_layer(self.o, index, dim=1) |
|
|
|
self.n_heads = self.n_heads - len(heads) |
|
self.inner_dim = self.key_value_proj_dim * self.n_heads |
|
self.pruned_heads = self.pruned_heads.union(heads) |
|
|
|
@staticmethod |
|
def relative_position_bucket(relative_position, num_buckets=32, max_distance=128): |
|
sign = torch.sign(relative_position) |
|
num_buckets //= 2 |
|
n = torch.abs(relative_position) |
|
|
|
|
|
max_exact = num_buckets // 2 |
|
is_small = n < max_exact |
|
max_bucket_val = num_buckets - 1 - max_exact |
|
|
|
val_if_large = max_exact + torch.ceil( |
|
torch.log(n.float() / max_exact) |
|
/ math.log((max_distance - 1) / max_exact) |
|
* max_bucket_val |
|
).long() |
|
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1)) |
|
ret = torch.where(is_small, n, val_if_large) * sign |
|
return ret |
|
|
|
def compute_bias(self, query_length, key_length): |
|
relative_position_bucket = self.rp_bucket[:query_length, :key_length] |
|
relative_position_bucket = relative_position_bucket.to(self.relative_attention_bias.weight.device) |
|
values = self.relative_attention_bias(relative_position_bucket) |
|
values = values.permute([2, 0, 1]).unsqueeze(0) |
|
return values |
|
|
|
def forward( |
|
self, |
|
hidden_states, |
|
mask=None, |
|
key_value_states=None, |
|
position_bias=None, |
|
past_key_value=None, |
|
layer_head_mask=None, |
|
query_length=None, |
|
use_cache=False, |
|
output_attentions=False, |
|
key_padding_mask=None, |
|
): |
|
""" |
|
Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). |
|
""" |
|
|
|
|
|
|
|
batch_size, seq_length = hidden_states.shape[:2] |
|
|
|
int_seq_length = int(seq_length) |
|
|
|
real_seq_length = seq_length |
|
|
|
if past_key_value is not None: |
|
assert ( |
|
len(past_key_value) == 2 |
|
), f"past_key_value should have 2 past states: keys and values. Got {len(past_key_value)} past states" |
|
real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length |
|
|
|
key_length = real_seq_length if key_value_states is None else key_value_states.shape[1] |
|
|
|
def shape(states): |
|
"""projection""" |
|
return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) |
|
|
|
def unshape(states): |
|
"""reshape""" |
|
return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) |
|
|
|
def project(hidden_states, proj_layer, key_value_states, past_key_value): |
|
"""projects hidden states correctly to key/query states""" |
|
if key_value_states is None: |
|
|
|
|
|
hidden_states = shape(proj_layer(hidden_states)) |
|
elif past_key_value is None: |
|
|
|
|
|
hidden_states = shape(proj_layer(key_value_states)) |
|
|
|
if past_key_value is not None: |
|
if key_value_states is None: |
|
|
|
|
|
hidden_states = torch.cat([past_key_value, hidden_states], dim=2) |
|
else: |
|
|
|
hidden_states = past_key_value |
|
return hidden_states |
|
|
|
|
|
query_states = shape(self.q(hidden_states)) * self.scaling |
|
|
|
|
|
key_states = project( |
|
hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None |
|
) |
|
value_states = project( |
|
hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None |
|
) |
|
|
|
|
|
scores = torch.matmul( |
|
query_states, key_states.transpose(3, 2) |
|
) |
|
|
|
if position_bias is None: |
|
if not self.has_relative_attention_bias: |
|
position_bias = torch.zeros( |
|
(1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype |
|
) |
|
if self.gradient_checkpointing and self.training: |
|
position_bias.requires_grad = True |
|
else: |
|
position_bias = self.compute_bias(real_seq_length, key_length) |
|
|
|
|
|
|
|
if past_key_value is not None: |
|
position_bias = position_bias[:, :, -int_seq_length:, :] |
|
|
|
if mask is not None: |
|
position_bias = position_bias + mask |
|
|
|
scores += position_bias |
|
|
|
if key_padding_mask is not None: |
|
scores = scores.masked_fill( |
|
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), |
|
float("-inf"), |
|
) |
|
|
|
attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as( |
|
scores |
|
) |
|
attn_weights = nn.functional.dropout( |
|
attn_weights, p=self.dropout, training=self.training |
|
) |
|
|
|
|
|
if layer_head_mask is not None: |
|
attn_weights = attn_weights * layer_head_mask |
|
|
|
attn_output = unshape(torch.matmul(attn_weights, value_states)) |
|
attn_output = self.o(attn_output) |
|
|
|
present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None |
|
outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) |
|
|
|
if output_attentions: |
|
outputs = outputs + (attn_weights,) |
|
return outputs |
|
|
|
|
|
class T5LayerSelfAttention(nn.Module): |
|
def __init__(self, config, has_relative_attention_bias=False, normalize_before=False): |
|
super().__init__() |
|
self.SelfAttention = T5Attention(config, has_relative_attention_bias=has_relative_attention_bias) |
|
|
|
self.layer_norm = FST5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) |
|
self.dropout = nn.Dropout(config.dropout_rate) |
|
self.normalize_before = normalize_before |
|
self.has_relative_attention_bias = has_relative_attention_bias |
|
|
|
def forward( |
|
self, |
|
hidden_states, |
|
attention_mask=None, |
|
position_bias=None, |
|
layer_head_mask=None, |
|
past_key_value=None, |
|
use_cache=False, |
|
output_attentions=False, |
|
key_padding_mask=None, |
|
): |
|
if self.normalize_before: |
|
normed_hidden_states = self.layer_norm(hidden_states) |
|
else: |
|
normed_hidden_states = hidden_states |
|
|
|
attention_output = self.SelfAttention( |
|
normed_hidden_states, |
|
mask=attention_mask, |
|
position_bias=position_bias, |
|
layer_head_mask=layer_head_mask, |
|
past_key_value=past_key_value, |
|
use_cache=use_cache, |
|
output_attentions=output_attentions, |
|
key_padding_mask=key_padding_mask, |
|
) |
|
hidden_states = hidden_states + self.dropout(attention_output[0]) |
|
|
|
if not self.normalize_before: |
|
hidden_states = self.layer_norm(hidden_states) |
|
|
|
outputs = (hidden_states,) + attention_output[1:] |
|
return outputs |
|
|
|
|
|
class T5LayerCrossAttention(nn.Module): |
|
def __init__(self, config, normalize_before=False): |
|
super().__init__() |
|
self.EncDecAttention = T5Attention(config, has_relative_attention_bias=False) |
|
|
|
self.layer_norm = FST5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) |
|
self.dropout = nn.Dropout(config.dropout_rate) |
|
|
|
self.normalize_before = normalize_before |
|
|
|
def forward( |
|
self, |
|
hidden_states, |
|
key_value_states, |
|
attention_mask=None, |
|
position_bias=None, |
|
layer_head_mask=None, |
|
past_key_value=None, |
|
use_cache=False, |
|
query_length=None, |
|
output_attentions=False, |
|
): |
|
if self.normalize_before: |
|
normed_hidden_states = self.layer_norm(hidden_states) |
|
else: |
|
normed_hidden_states = hidden_states |
|
|
|
attention_output = self.EncDecAttention( |
|
normed_hidden_states, |
|
mask=attention_mask, |
|
key_value_states=key_value_states, |
|
position_bias=position_bias, |
|
layer_head_mask=layer_head_mask, |
|
past_key_value=past_key_value, |
|
use_cache=use_cache, |
|
query_length=query_length, |
|
output_attentions=output_attentions, |
|
) |
|
layer_output = hidden_states + self.dropout(attention_output[0]) |
|
|
|
if not self.normalize_before: |
|
layer_output = self.layer_norm(layer_output) |
|
|
|
outputs = (layer_output,) + attention_output[1:] |
|
return outputs |
|
|
|
|
|
class T5Block(nn.Module): |
|
def __init__(self, config, has_relative_attention_bias=False): |
|
super().__init__() |
|
self.is_decoder = config.is_decoder |
|
self.layer = nn.ModuleList() |
|
self.layer.append(T5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias)) |
|
if self.is_decoder: |
|
self.layer.append(T5LayerCrossAttention(config)) |
|
|
|
self.layer.append(T5LayerFF(config)) |
|
|
|
def forward( |
|
self, |
|
hidden_states, |
|
attention_mask=None, |
|
position_bias=None, |
|
encoder_hidden_states=None, |
|
encoder_attention_mask=None, |
|
encoder_decoder_position_bias=None, |
|
layer_head_mask=None, |
|
cross_attn_layer_head_mask=None, |
|
past_key_value=None, |
|
use_cache=False, |
|
output_attentions=False, |
|
return_dict=True, |
|
key_padding_mask=None, |
|
): |
|
|
|
if past_key_value is not None: |
|
assert self.is_decoder, "Only decoder can use `past_key_values`" |
|
expected_num_past_key_values = 2 if encoder_hidden_states is None else 4 |
|
|
|
if len(past_key_value) != expected_num_past_key_values: |
|
raise ValueError( |
|
f"There should be {expected_num_past_key_values} past states. " |
|
f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}" |
|
f"Got {len(past_key_value)} past key / value states" |
|
) |
|
|
|
self_attn_past_key_value = past_key_value[:2] |
|
cross_attn_past_key_value = past_key_value[2:] |
|
else: |
|
self_attn_past_key_value, cross_attn_past_key_value = None, None |
|
|
|
self_attention_outputs = self.layer[0]( |
|
hidden_states, |
|
attention_mask=attention_mask, |
|
position_bias=position_bias, |
|
layer_head_mask=layer_head_mask, |
|
past_key_value=self_attn_past_key_value, |
|
use_cache=use_cache, |
|
output_attentions=output_attentions, |
|
key_padding_mask=key_padding_mask, |
|
) |
|
hidden_states, present_key_value_state = self_attention_outputs[:2] |
|
attention_outputs = self_attention_outputs[2:] |
|
|
|
|
|
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): |
|
clamp_value = torch.finfo(hidden_states.dtype).max - 1000 |
|
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) |
|
|
|
do_cross_attention = self.is_decoder and encoder_hidden_states is not None |
|
if do_cross_attention: |
|
|
|
|
|
if present_key_value_state is not None: |
|
query_length = present_key_value_state[0].shape[2] |
|
else: |
|
query_length = None |
|
|
|
cross_attention_outputs = self.layer[1]( |
|
hidden_states, |
|
key_value_states=encoder_hidden_states, |
|
attention_mask=encoder_attention_mask, |
|
position_bias=encoder_decoder_position_bias, |
|
layer_head_mask=cross_attn_layer_head_mask, |
|
past_key_value=cross_attn_past_key_value, |
|
query_length=query_length, |
|
use_cache=use_cache, |
|
output_attentions=output_attentions, |
|
) |
|
hidden_states = cross_attention_outputs[0] |
|
|
|
|
|
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): |
|
clamp_value = torch.finfo(hidden_states.dtype).max - 1000 |
|
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) |
|
|
|
|
|
if present_key_value_state is not None: |
|
present_key_value_state = present_key_value_state + cross_attention_outputs[1] |
|
|
|
|
|
attention_outputs = attention_outputs + cross_attention_outputs[2:] |
|
|
|
|
|
hidden_states = self.layer[-1](hidden_states) |
|
|
|
|
|
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): |
|
clamp_value = torch.finfo(hidden_states.dtype).max - 1000 |
|
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) |
|
|
|
outputs = (hidden_states,) |
|
|
|
if use_cache: |
|
outputs = outputs + (present_key_value_state,) + attention_outputs |
|
else: |
|
outputs = outputs + attention_outputs |
|
|
|
return outputs |
|
|
|
|
|
class FairseqT5PreTrainedModel(PreTrainedModel): |
|
""" |
|
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained |
|
models. |
|
""" |
|
|
|
config_class = FairseqT5Config |
|
load_tf_weights = None |
|
base_model_prefix = "transformer" |
|
is_parallelizable = True |
|
supports_gradient_checkpointing = True |
|
|
|
@property |
|
def dummy_inputs(self): |
|
input_ids = torch.tensor(DUMMY_INPUTS) |
|
input_mask = torch.tensor(DUMMY_MASK) |
|
dummy_inputs = { |
|
"decoder_input_ids": input_ids, |
|
"input_ids": input_ids, |
|
"decoder_attention_mask": input_mask, |
|
} |
|
return dummy_inputs |
|
|
|
def _init_weights(self, module): |
|
"""Initialize the weights""" |
|
factor = self.config.initializer_factor |
|
if isinstance(module, T5LayerNorm) or isinstance(module, torch.nn.LayerNorm): |
|
module.weight.data.fill_(factor * 1.0) |
|
elif isinstance(module, (FairseqT5Model, FairseqT5ForConditionalGeneration, FairseqT5EncoderModel)): |
|
|
|
|
|
module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) |
|
elif isinstance(module, T5DenseReluDense): |
|
|
|
|
|
|
|
module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) |
|
if hasattr(module.wi, "bias") and module.wi.bias is not None: |
|
module.wi.bias.data.zero_() |
|
module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) |
|
if hasattr(module.wo, "bias") and module.wo.bias is not None: |
|
module.wo.bias.data.zero_() |
|
elif isinstance(module, T5DenseGatedGeluDense): |
|
module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) |
|
if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None: |
|
module.wi_0.bias.data.zero_() |
|
module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) |
|
if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None: |
|
module.wi_1.bias.data.zero_() |
|
module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) |
|
if hasattr(module.wo, "bias") and module.wo.bias is not None: |
|
module.wo.bias.data.zero_() |
|
elif isinstance(module, T5Attention): |
|
|
|
|
|
d_model = self.config.d_model |
|
key_value_proj_dim = self.config.d_kv |
|
n_heads = self.config.num_heads |
|
module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) |
|
module.k.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5)) |
|
module.v.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5)) |
|
module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) |
|
if module.has_relative_attention_bias: |
|
module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5)) |
|
|
|
def _set_gradient_checkpointing(self, module, value=False): |
|
if isinstance(module, (T5Attention, FairseqT5Stack)): |
|
module.gradient_checkpointing = value |
|
|
|
def _shift_right(self, input_ids): |
|
decoder_start_token_id = self.config.decoder_start_token_id |
|
pad_token_id = self.config.pad_token_id |
|
|
|
assert ( |
|
decoder_start_token_id is not None |
|
), "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. See T5 docs for more information" |
|
|
|
|
|
if is_torch_fx_proxy(input_ids): |
|
|
|
shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) |
|
shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) |
|
else: |
|
shifted_input_ids = input_ids.new_zeros(input_ids.shape) |
|
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() |
|
shifted_input_ids[..., 0] = decoder_start_token_id |
|
|
|
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined." |
|
|
|
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) |
|
|
|
assert torch.all(shifted_input_ids >= 0).item(), "Verify that `shifted_input_ids` has only positive values" |
|
|
|
return shifted_input_ids |
|
|
|
|
|
class FairseqT5Stack(FairseqT5PreTrainedModel): |
|
def __init__(self, config, embed_tokens=None): |
|
super().__init__(config) |
|
|
|
self.embed_tokens = embed_tokens |
|
self.pos_embed = PositionalEmbedding( |
|
1024, |
|
config.d_model, |
|
config.pad_token_id, |
|
) |
|
self.is_decoder = config.is_decoder |
|
|
|
|
|
self.first_layer_norm = FST5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) |
|
self.dropout = nn.Dropout(config.dropout_rate) |
|
|
|
|
|
if not self.is_decoder: |
|
self.block = nn.ModuleList( |
|
|
|
[T5Block(config, has_relative_attention_bias=True) for i in range(config.num_layers)] |
|
) |
|
else: |
|
self.block = nn.ModuleList( |
|
|
|
[T5Block(config, has_relative_attention_bias=False) for i in range(config.num_layers)] |
|
) |
|
|
|
self.init_weights() |
|
|
|
self.model_parallel = False |
|
self.device_map = None |
|
self.gradient_checkpointing = False |
|
|
|
self.padding_idx = self.config.pad_token_id |
|
|
|
def parallelize(self, device_map=None): |
|
|
|
self.device_map = ( |
|
get_device_map(len(self.block), range(torch.cuda.device_count())) if device_map is None else device_map |
|
) |
|
assert_device_map(self.device_map, len(self.block)) |
|
self.model_parallel = True |
|
self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys())) |
|
self.last_device = "cuda:" + str(max(self.device_map.keys())) |
|
|
|
for k, v in self.device_map.items(): |
|
for layer in v: |
|
cuda_device = "cuda:" + str(k) |
|
self.block[layer] = self.block[layer].to(cuda_device) |
|
|
|
|
|
self.embed_tokens = self.embed_tokens.to(self.first_device) |
|
self.pos_embed = self.pos_embed.to(self.first_device) |
|
|
|
self.first_layer_norm = self.first_layer_norm.to(self.first_device) |
|
|
|
def deparallelize(self): |
|
self.model_parallel = False |
|
self.device_map = None |
|
self.first_device = "cpu" |
|
self.last_device = "cpu" |
|
for i in range(len(self.block)): |
|
self.block[i] = self.block[i].to("cpu") |
|
self.embed_tokens = self.embed_tokens.to("cpu") |
|
self.first_layer_norm = self.first_layer_norm.to("cpu") |
|
torch.cuda.empty_cache() |
|
|
|
def get_input_embeddings(self): |
|
return self.embed_tokens |
|
|
|
def set_input_embeddings(self, new_embeddings): |
|
self.embed_tokens = new_embeddings |
|
|
|
def forward( |
|
self, |
|
input_ids=None, |
|
attention_mask=None, |
|
encoder_hidden_states=None, |
|
encoder_attention_mask=None, |
|
inputs_embeds=None, |
|
head_mask=None, |
|
cross_attn_head_mask=None, |
|
past_key_values=None, |
|
use_cache=None, |
|
output_attentions=None, |
|
output_hidden_states=None, |
|
return_dict=None, |
|
pos_offset=0, |
|
): |
|
|
|
if self.model_parallel: |
|
torch.cuda.set_device(self.first_device) |
|
self.embed_tokens = self.embed_tokens.to(self.first_device) |
|
self.pos_embed = self.pos_embed.to(self.first_device) |
|
use_cache = use_cache if use_cache is not None else self.config.use_cache |
|
|
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
|
output_hidden_states = ( |
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
|
) |
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
|
if input_ids is not None and inputs_embeds is not None: |
|
err_msg_prefix = "decoder_" if self.is_decoder else "" |
|
raise ValueError( |
|
f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time" |
|
) |
|
elif input_ids is not None: |
|
input_shape = input_ids.size() |
|
input_ids = input_ids.view(-1, input_shape[-1]) |
|
elif inputs_embeds is not None: |
|
input_shape = inputs_embeds.size()[:-1] |
|
else: |
|
err_msg_prefix = "decoder_" if self.is_decoder else "" |
|
raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds") |
|
|
|
if inputs_embeds is None: |
|
assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings" |
|
inputs_embeds = self.embed_tokens(input_ids) |
|
|
|
batch_size, seq_length = input_shape |
|
|
|
|
|
mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length |
|
|
|
if use_cache is True: |
|
assert self.is_decoder, f"`use_cache` can only be set to `True` if {self} is used as a decoder" |
|
|
|
if attention_mask is None: |
|
attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device) |
|
if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None: |
|
encoder_seq_length = encoder_hidden_states.shape[1] |
|
encoder_attention_mask = torch.ones( |
|
batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long |
|
) |
|
|
|
|
|
if past_key_values is None: |
|
past_key_values = [None] * len(self.block) |
|
|
|
|
|
|
|
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, inputs_embeds.device) |
|
|
|
|
|
|
|
if self.is_decoder and encoder_attention_mask is not None: |
|
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) |
|
else: |
|
encoder_extended_attention_mask = None |
|
|
|
|
|
head_mask = self.get_head_mask(head_mask, self.config.num_layers) |
|
cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) |
|
present_key_value_states = () if use_cache else None |
|
all_hidden_states = () if output_hidden_states else None |
|
all_attentions = () if output_attentions else None |
|
all_cross_attentions = () if (output_attentions and self.is_decoder) else None |
|
position_bias = None |
|
encoder_decoder_position_bias = None |
|
|
|
|
|
|
|
|
|
pos_embeds = self.pos_embed(input_ids, offset=pos_offset) |
|
inputs_embeds = inputs_embeds + pos_embeds |
|
|
|
|
|
hidden_states = self.first_layer_norm(inputs_embeds) |
|
hidden_states = self.dropout(hidden_states) |
|
|
|
key_padding_mask: Optional[Tensor] = None |
|
if self.is_decoder: |
|
if input_ids.eq(self.padding_idx).any(): |
|
key_padding_mask = input_ids.eq(self.padding_idx) |
|
|
|
for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)): |
|
layer_head_mask = head_mask[i] |
|
cross_attn_layer_head_mask = cross_attn_head_mask[i] |
|
|
|
if self.model_parallel: |
|
torch.cuda.set_device(hidden_states.device) |
|
|
|
if attention_mask is not None: |
|
attention_mask = attention_mask.to(hidden_states.device) |
|
if position_bias is not None: |
|
position_bias = position_bias.to(hidden_states.device) |
|
if encoder_hidden_states is not None: |
|
encoder_hidden_states = encoder_hidden_states.to(hidden_states.device) |
|
if encoder_extended_attention_mask is not None: |
|
encoder_extended_attention_mask = encoder_extended_attention_mask.to(hidden_states.device) |
|
if encoder_decoder_position_bias is not None: |
|
encoder_decoder_position_bias = encoder_decoder_position_bias.to(hidden_states.device) |
|
if layer_head_mask is not None: |
|
layer_head_mask = layer_head_mask.to(hidden_states.device) |
|
if cross_attn_layer_head_mask is not None: |
|
cross_attn_layer_head_mask = cross_attn_layer_head_mask.to(hidden_states.device) |
|
if output_hidden_states: |
|
all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
|
if self.gradient_checkpointing and self.training: |
|
if use_cache: |
|
logger.warn( |
|
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." |
|
) |
|
use_cache = False |
|
|
|
def create_custom_forward(module): |
|
def custom_forward(*inputs): |
|
return tuple(module(*inputs, use_cache, output_attentions)) |
|
|
|
return custom_forward |
|
|
|
layer_outputs = checkpoint( |
|
create_custom_forward(layer_module), |
|
hidden_states, |
|
extended_attention_mask, |
|
position_bias, |
|
encoder_hidden_states, |
|
encoder_extended_attention_mask, |
|
encoder_decoder_position_bias, |
|
layer_head_mask, |
|
cross_attn_layer_head_mask, |
|
None, |
|
key_padding_mask=key_padding_mask, |
|
) |
|
else: |
|
layer_outputs = layer_module( |
|
hidden_states, |
|
attention_mask=extended_attention_mask, |
|
position_bias=position_bias, |
|
encoder_hidden_states=encoder_hidden_states, |
|
encoder_attention_mask=encoder_extended_attention_mask, |
|
encoder_decoder_position_bias=encoder_decoder_position_bias, |
|
layer_head_mask=layer_head_mask, |
|
cross_attn_layer_head_mask=cross_attn_layer_head_mask, |
|
past_key_value=past_key_value, |
|
use_cache=use_cache, |
|
output_attentions=output_attentions, |
|
key_padding_mask=key_padding_mask, |
|
) |
|
|
|
|
|
|
|
if use_cache is False: |
|
layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] |
|
|
|
hidden_states, present_key_value_state = layer_outputs[:2] |
|
|
|
|
|
|
|
|
|
|
|
if self.is_decoder and encoder_hidden_states is not None: |
|
encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3] |
|
|
|
if use_cache: |
|
present_key_value_states = present_key_value_states + (present_key_value_state,) |
|
|
|
if output_attentions: |
|
all_attentions = all_attentions + (layer_outputs[3],) |
|
if self.is_decoder: |
|
all_cross_attentions = all_cross_attentions + (layer_outputs[5],) |
|
|
|
|
|
if self.model_parallel: |
|
for k, v in self.device_map.items(): |
|
if i == v[-1] and "cuda:" + str(k) != self.last_device: |
|
hidden_states = hidden_states.to("cuda:" + str(k + 1)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
if output_hidden_states: |
|
all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
|
if not return_dict: |
|
return tuple( |
|
v |
|
for v in [ |
|
hidden_states, |
|
present_key_value_states, |
|
all_hidden_states, |
|
all_attentions, |
|
all_cross_attentions, |
|
] |
|
if v is not None |
|
) |
|
return BaseModelOutputWithPastAndCrossAttentions( |
|
last_hidden_state=hidden_states, |
|
past_key_values=present_key_value_states, |
|
hidden_states=all_hidden_states, |
|
attentions=all_attentions, |
|
cross_attentions=all_cross_attentions, |
|
) |
|
|
|
|
|
class FairseqT5Model(FairseqT5PreTrainedModel): |
|
_keys_to_ignore_on_load_missing = [ |
|
r"encoder\.embed_tokens\.weight", |
|
r"decoder\.embed_tokens\.weight", |
|
] |
|
_keys_to_ignore_on_load_unexpected = [ |
|
r"decoder\.block\.0\.layer\.1\.EncDecAttention\.relative_attention_bias\.weight", |
|
] |
|
|
|
def __init__(self, config: FairseqT5Config): |
|
super().__init__(config) |
|
self.shared = nn.Embedding(config.vocab_size, config.d_model) |
|
|
|
encoder_config = copy.deepcopy(config) |
|
encoder_config.is_decoder = False |
|
encoder_config.use_cache = False |
|
encoder_config.is_encoder_decoder = False |
|
self.encoder = FairseqT5Stack(encoder_config, self.shared) |
|
|
|
decoder_config = copy.deepcopy(config) |
|
decoder_config.is_decoder = True |
|
decoder_config.is_encoder_decoder = False |
|
decoder_config.num_layers = config.num_decoder_layers |
|
self.decoder = FairseqT5Stack(decoder_config, self.shared) |
|
|
|
|
|
self.init_weights() |
|
|
|
|
|
self.model_parallel = False |
|
self.device_map = None |
|
|
|
def parallelize(self, device_map=None): |
|
self.device_map = ( |
|
get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) |
|
if device_map is None |
|
else device_map |
|
) |
|
assert_device_map(self.device_map, len(self.encoder.block)) |
|
self.encoder.parallelize(self.device_map) |
|
self.decoder.parallelize(self.device_map) |
|
self.model_parallel = True |
|
|
|
def deparallelize(self): |
|
self.encoder.deparallelize() |
|
self.decoder.deparallelize() |
|
self.encoder = self.encoder.to("cpu") |
|
self.decoder = self.decoder.to("cpu") |
|
self.model_parallel = False |
|
self.device_map = None |
|
torch.cuda.empty_cache() |
|
|
|
def get_input_embeddings(self): |
|
return self.shared |
|
|
|
def set_input_embeddings(self, new_embeddings): |
|
self.shared = new_embeddings |
|
self.encoder.set_input_embeddings(new_embeddings) |
|
self.decoder.set_input_embeddings(new_embeddings) |
|
|
|
def get_encoder(self): |
|
return self.encoder |
|
|
|
def get_decoder(self): |
|
return self.decoder |
|
|
|
def _prune_heads(self, heads_to_prune): |
|
""" |
|
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base |
|
class PreTrainedModel |
|
""" |
|
for layer, heads in heads_to_prune.items(): |
|
self.encoder.layer[layer].attention.prune_heads(heads) |
|
|
|
def forward( |
|
self, |
|
input_ids=None, |
|
attention_mask=None, |
|
decoder_input_ids=None, |
|
decoder_attention_mask=None, |
|
head_mask=None, |
|
decoder_head_mask=None, |
|
cross_attn_head_mask=None, |
|
encoder_outputs=None, |
|
past_key_values=None, |
|
inputs_embeds=None, |
|
decoder_inputs_embeds=None, |
|
use_cache=None, |
|
output_attentions=None, |
|
output_hidden_states=None, |
|
return_dict=None, |
|
): |
|
r""" |
|
Returns: Seq2SeqModelOutput |
|
|
|
Example: |
|
|
|
```python |
|
>>> from transformers import T5Tokenizer, T5Model |
|
|
|
>>> tokenizer = T5Tokenizer.from_pretrained("t5-small") |
|
>>> model = FairseqT5Model.from_pretrained("t5-small") |
|
|
|
>>> input_ids = tokenizer( |
|
... "Studies have been shown that owning a dog is good for you", return_tensors="pt" |
|
>>> ).input_ids # Batch size 1 |
|
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 |
|
|
|
>>> # forward pass |
|
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) |
|
>>> last_hidden_states = outputs.last_hidden_state |
|
```""" |
|
use_cache = False |
|
|
|
use_cache = use_cache if use_cache is not None else self.config.use_cache |
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
|
|
|
if head_mask is not None and decoder_head_mask is None: |
|
if self.config.num_layers == self.config.num_decoder_layers: |
|
decoder_head_mask = head_mask |
|
|
|
|
|
if encoder_outputs is None: |
|
encoder_outputs = self.encoder( |
|
input_ids=input_ids, |
|
attention_mask=attention_mask, |
|
inputs_embeds=inputs_embeds, |
|
head_mask=head_mask, |
|
output_attentions=output_attentions, |
|
output_hidden_states=output_hidden_states, |
|
return_dict=return_dict, |
|
) |
|
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): |
|
encoder_outputs = BaseModelOutput( |
|
last_hidden_state=encoder_outputs[0], |
|
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, |
|
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, |
|
) |
|
|
|
hidden_states = encoder_outputs[0] |
|
if self.model_parallel: |
|
torch.cuda.set_device(self.decoder.first_device) |
|
|
|
if self.model_parallel: |
|
torch.cuda.set_device(self.decoder.first_device) |
|
hidden_states = hidden_states.to(self.decoder.first_device) |
|
if decoder_input_ids is not None: |
|
decoder_input_ids = decoder_input_ids.to(self.decoder.first_device) |
|
if attention_mask is not None: |
|
attention_mask = attention_mask.to(self.decoder.first_device) |
|
if decoder_attention_mask is not None: |
|
decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device) |
|
|
|
|
|
decoder_outputs = self.decoder( |
|
input_ids=decoder_input_ids, |
|
attention_mask=decoder_attention_mask, |
|
inputs_embeds=decoder_inputs_embeds, |
|
past_key_values=past_key_values, |
|
encoder_hidden_states=hidden_states, |
|
encoder_attention_mask=attention_mask, |
|
head_mask=decoder_head_mask, |
|
cross_attn_head_mask=cross_attn_head_mask, |
|
use_cache=use_cache, |
|
output_attentions=output_attentions, |
|
output_hidden_states=output_hidden_states, |
|
return_dict=return_dict, |
|
) |
|
|
|
if not return_dict: |
|
return decoder_outputs + encoder_outputs |
|
|
|
return Seq2SeqModelOutput( |
|
last_hidden_state=decoder_outputs.last_hidden_state, |
|
past_key_values=decoder_outputs.past_key_values, |
|
decoder_hidden_states=decoder_outputs.hidden_states, |
|
decoder_attentions=decoder_outputs.attentions, |
|
cross_attentions=decoder_outputs.cross_attentions, |
|
encoder_last_hidden_state=encoder_outputs.last_hidden_state, |
|
encoder_hidden_states=encoder_outputs.hidden_states, |
|
encoder_attentions=encoder_outputs.attentions, |
|
) |
|
|
|
|
|
class FairseqT5ForConditionalGeneration(FairseqT5PreTrainedModel): |
|
_keys_to_ignore_on_load_missing = [ |
|
r"encoder\.embed_tokens\.weight", |
|
r"decoder\.embed_tokens\.weight", |
|
r"lm_head\.weight", |
|
] |
|
_keys_to_ignore_on_load_unexpected = [ |
|
r"decoder\.block\.0\.layer\.1\.EncDecAttention\.relative_attention_bias\.weight", |
|
] |
|
|
|
def __init__(self, config): |
|
super().__init__(config) |
|
self.model_dim = config.d_model |
|
|
|
self.shared = nn.Embedding(config.vocab_size, config.d_model) |
|
|
|
encoder_config = copy.deepcopy(config) |
|
encoder_config.is_decoder = False |
|
encoder_config.use_cache = False |
|
encoder_config.is_encoder_decoder = False |
|
self.encoder = FairseqT5Stack(encoder_config, self.shared) |
|
|
|
decoder_config = copy.deepcopy(config) |
|
decoder_config.is_decoder = True |
|
decoder_config.is_encoder_decoder = False |
|
decoder_config.num_layers = config.num_decoder_layers |
|
self.decoder = FairseqT5Stack(decoder_config, self.shared) |
|
|
|
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) |
|
|
|
|
|
self.init_weights() |
|
|
|
|
|
self.model_parallel = False |
|
self.device_map = None |
|
|
|
def parallelize(self, device_map=None): |
|
self.device_map = ( |
|
get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) |
|
if device_map is None |
|
else device_map |
|
) |
|
assert_device_map(self.device_map, len(self.encoder.block)) |
|
self.encoder.parallelize(self.device_map) |
|
self.decoder.parallelize(self.device_map) |
|
self.lm_head = self.lm_head.to(self.decoder.first_device) |
|
self.model_parallel = True |
|
|
|
def deparallelize(self): |
|
self.encoder.deparallelize() |
|
self.decoder.deparallelize() |
|
self.encoder = self.encoder.to("cpu") |
|
self.decoder = self.decoder.to("cpu") |
|
self.lm_head = self.lm_head.to("cpu") |
|
self.model_parallel = False |
|
self.device_map = None |
|
torch.cuda.empty_cache() |
|
|
|
def get_input_embeddings(self): |
|
return self.shared |
|
|
|
def set_input_embeddings(self, new_embeddings): |
|
self.shared = new_embeddings |
|
self.encoder.set_input_embeddings(new_embeddings) |
|
self.decoder.set_input_embeddings(new_embeddings) |
|
|
|
def set_output_embeddings(self, new_embeddings): |
|
self.lm_head = new_embeddings |
|
|
|
def get_output_embeddings(self): |
|
return self.lm_head |
|
|
|
def get_encoder(self): |
|
return self.encoder |
|
|
|
def get_decoder(self): |
|
return self.decoder |
|
|
|
def forward( |
|
self, |
|
input_ids=None, |
|
attention_mask=None, |
|
decoder_input_ids=None, |
|
decoder_attention_mask=None, |
|
head_mask=None, |
|
decoder_head_mask=None, |
|
cross_attn_head_mask=None, |
|
encoder_outputs=None, |
|
past_key_values=None, |
|
inputs_embeds=None, |
|
decoder_inputs_embeds=None, |
|
labels=None, |
|
use_cache=None, |
|
output_attentions=None, |
|
output_hidden_states=None, |
|
return_dict=None, |
|
pos_offset=0, |
|
): |
|
r""" |
|
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
|
Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., |
|
config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for |
|
labels in `[0, ..., config.vocab_size]` |
|
|
|
Returns: Seq2SeqLMOutput |
|
|
|
Examples: |
|
|
|
```python |
|
>>> from transformers import T5Tokenizer, T5ForConditionalGeneration |
|
|
|
>>> tokenizer = T5Tokenizer.from_pretrained("t5-small") |
|
>>> model = FairseqT5ForConditionalGeneration.from_pretrained("t5-small") |
|
|
|
>>> # training |
|
>>> input_ids = tokenizer("The <extra_id_0> walks in <extra_id_1> park", return_tensors="pt").input_ids |
|
>>> labels = tokenizer("<extra_id_0> cute dog <extra_id_1> the <extra_id_2>", return_tensors="pt").input_ids |
|
>>> outputs = model(input_ids=input_ids, labels=labels) |
|
>>> loss = outputs.loss |
|
>>> logits = outputs.logits |
|
|
|
>>> # inference |
|
>>> input_ids = tokenizer( |
|
... "summarize: studies have shown that owning a dog is good for you", return_tensors="pt" |
|
>>> ).input_ids # Batch size 1 |
|
>>> outputs = model.generate(input_ids) |
|
>>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) |
|
>>> # studies have shown that owning a dog is good for you. |
|
```""" |
|
|
|
use_cache = use_cache if use_cache is not None else self.config.use_cache |
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
|
|
|
if head_mask is not None and decoder_head_mask is None: |
|
if self.config.num_layers == self.config.num_decoder_layers: |
|
decoder_head_mask = head_mask |
|
|
|
|
|
if encoder_outputs is None: |
|
|
|
encoder_outputs = self.encoder( |
|
input_ids=input_ids, |
|
attention_mask=attention_mask, |
|
inputs_embeds=inputs_embeds, |
|
head_mask=head_mask, |
|
output_attentions=output_attentions, |
|
output_hidden_states=output_hidden_states, |
|
return_dict=return_dict, |
|
) |
|
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): |
|
encoder_outputs = BaseModelOutput( |
|
last_hidden_state=encoder_outputs[0], |
|
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, |
|
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, |
|
) |
|
|
|
hidden_states = encoder_outputs[0] |
|
|
|
if self.model_parallel: |
|
torch.cuda.set_device(self.decoder.first_device) |
|
|
|
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: |
|
|
|
decoder_input_ids = self._shift_right(labels) |
|
|
|
|
|
|
|
if past_key_values is not None: |
|
assert labels is None, "Decoder should not use cached key value states when training." |
|
if decoder_input_ids is not None: |
|
decoder_input_ids = decoder_input_ids[:, -1:] |
|
if decoder_inputs_embeds is not None: |
|
decoder_inputs_embeds = decoder_inputs_embeds[:, -1:] |
|
|
|
|
|
if self.model_parallel: |
|
torch.cuda.set_device(self.decoder.first_device) |
|
hidden_states = hidden_states.to(self.decoder.first_device) |
|
if decoder_input_ids is not None: |
|
decoder_input_ids = decoder_input_ids.to(self.decoder.first_device) |
|
if attention_mask is not None: |
|
attention_mask = attention_mask.to(self.decoder.first_device) |
|
if decoder_attention_mask is not None: |
|
decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device) |
|
|
|
|
|
decoder_outputs = self.decoder( |
|
input_ids=decoder_input_ids, |
|
attention_mask=decoder_attention_mask, |
|
inputs_embeds=decoder_inputs_embeds, |
|
past_key_values=past_key_values, |
|
encoder_hidden_states=hidden_states, |
|
encoder_attention_mask=attention_mask, |
|
head_mask=decoder_head_mask, |
|
cross_attn_head_mask=cross_attn_head_mask, |
|
use_cache=use_cache, |
|
output_attentions=output_attentions, |
|
output_hidden_states=output_hidden_states, |
|
return_dict=return_dict, |
|
pos_offset=pos_offset, |
|
) |
|
|
|
sequence_output = decoder_outputs[0] |
|
|
|
|
|
if self.model_parallel: |
|
torch.cuda.set_device(self.encoder.first_device) |
|
self.lm_head = self.lm_head.to(self.encoder.first_device) |
|
sequence_output = sequence_output.to(self.lm_head.weight.device) |
|
|
|
lm_logits = self.lm_head(sequence_output) |
|
|
|
loss = None |
|
if labels is not None: |
|
loss_fct = nn.CrossEntropyLoss(ignore_index=-100) |
|
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) |
|
|
|
|
|
if not return_dict: |
|
output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs |
|
return ((loss,) + output) if loss is not None else output |
|
|
|
return Seq2SeqLMOutput( |
|
loss=loss, |
|
logits=lm_logits, |
|
past_key_values=decoder_outputs.past_key_values, |
|
decoder_hidden_states=decoder_outputs.hidden_states, |
|
decoder_attentions=decoder_outputs.attentions, |
|
cross_attentions=decoder_outputs.cross_attentions, |
|
encoder_last_hidden_state=encoder_outputs.last_hidden_state, |
|
encoder_hidden_states=encoder_outputs.hidden_states, |
|
encoder_attentions=encoder_outputs.attentions, |
|
) |
|
|
|
def prepare_inputs_for_generation( |
|
self, |
|
input_ids, |
|
past=None, |
|
attention_mask=None, |
|
head_mask=None, |
|
decoder_head_mask=None, |
|
cross_attn_head_mask=None, |
|
use_cache=None, |
|
encoder_outputs=None, |
|
**kwargs |
|
): |
|
|
|
offset = 0 |
|
if past is not None: |
|
offset = max(0, int(input_ids.size(1)) - 1) |
|
input_ids = input_ids[:, -1:] |
|
|
|
return { |
|
"decoder_input_ids": input_ids, |
|
"past_key_values": past, |
|
"encoder_outputs": encoder_outputs, |
|
"attention_mask": attention_mask, |
|
"head_mask": head_mask, |
|
"decoder_head_mask": decoder_head_mask, |
|
"cross_attn_head_mask": cross_attn_head_mask, |
|
"use_cache": use_cache, |
|
"pos_offset": offset, |
|
} |
|
|
|
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): |
|
return self._shift_right(labels) |
|
|
|
def _reorder_cache(self, past, beam_idx): |
|
|
|
|
|
if past is None: |
|
logger.warning("You might want to consider setting `use_cache=True` to speed up decoding") |
|
return past |
|
|
|
reordered_decoder_past = () |
|
for layer_past_states in past: |
|
|
|
|
|
reordered_layer_past_states = () |
|
for layer_past_state in layer_past_states: |
|
|
|
reordered_layer_past_states = reordered_layer_past_states + ( |
|
layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)), |
|
) |
|
|
|
assert reordered_layer_past_states[0].shape == layer_past_states[0].shape |
|
assert len(reordered_layer_past_states) == len(layer_past_states) |
|
|
|
reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) |
|
return reordered_decoder_past |
|
|
|
|
|
class FairseqT5EncoderModel(FairseqT5PreTrainedModel): |
|
authorized_missing_keys = [ |
|
r"encoder\.embed_tokens\.weight", |
|
] |
|
|
|
def __init__(self, config: FairseqT5Config): |
|
super().__init__(config) |
|
self.shared = nn.Embedding(config.vocab_size, config.d_model) |
|
|
|
encoder_config = copy.deepcopy(config) |
|
encoder_config.is_decoder = False |
|
encoder_config.use_cache = False |
|
encoder_config.is_encoder_decoder = False |
|
self.encoder = FairseqT5Stack(encoder_config, self.shared) |
|
|
|
|
|
self.init_weights() |
|
|
|
|
|
self.model_parallel = False |
|
self.device_map = None |
|
|
|
def parallelize(self, device_map=None): |
|
self.device_map = ( |
|
get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) |
|
if device_map is None |
|
else device_map |
|
) |
|
assert_device_map(self.device_map, len(self.encoder.block)) |
|
self.encoder.parallelize(self.device_map) |
|
self.model_parallel = True |
|
|
|
def deparallelize(self): |
|
self.encoder.deparallelize() |
|
self.encoder = self.encoder.to("cpu") |
|
self.model_parallel = False |
|
self.device_map = None |
|
torch.cuda.empty_cache() |
|
|
|
def get_input_embeddings(self): |
|
return self.shared |
|
|
|
def set_input_embeddings(self, new_embeddings): |
|
self.shared = new_embeddings |
|
self.encoder.set_input_embeddings(new_embeddings) |
|
|
|
def get_encoder(self): |
|
return self.encoder |
|
|
|
def _prune_heads(self, heads_to_prune): |
|
""" |
|
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base |
|
class PreTrainedModel |
|
""" |
|
for layer, heads in heads_to_prune.items(): |
|
self.encoder.layer[layer].attention.prune_heads(heads) |
|
|
|
def forward( |
|
self, |
|
input_ids=None, |
|
attention_mask=None, |
|
head_mask=None, |
|
inputs_embeds=None, |
|
output_attentions=None, |
|
output_hidden_states=None, |
|
return_dict=None, |
|
): |
|
r""" |
|
Returns: BaseModelOutput |
|
|
|
Example: |
|
|
|
```python |
|
>>> from transformers import T5Tokenizer, T5EncoderModel |
|
|
|
>>> tokenizer = T5Tokenizer.from_pretrained("t5-small") |
|
>>> model = FairseqT5EncoderModel.from_pretrained("t5-small") |
|
>>> input_ids = tokenizer( |
|
... "Studies have been shown that owning a dog is good for you", return_tensors="pt" |
|
>>> ).input_ids # Batch size 1 |
|
>>> outputs = model(input_ids=input_ids) |
|
>>> last_hidden_states = outputs.last_hidden_state |
|
```""" |
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
|
encoder_outputs = self.encoder( |
|
input_ids=input_ids, |
|
attention_mask=attention_mask, |
|
inputs_embeds=inputs_embeds, |
|
head_mask=head_mask, |
|
output_attentions=output_attentions, |
|
output_hidden_states=output_hidden_states, |
|
return_dict=return_dict, |
|
) |
|
|
|
return encoder_outputs |
|
|