|
|
import torch |
|
|
from torch.utils.data import Dataset, DataLoader |
|
|
from transformers import MT5ForConditionalGeneration, MT5Tokenizer, AdamW |
|
|
from transformers import AutoModel, AutoTokenizer |
|
|
from sklearn.metrics.pairwise import cosine_similarity |
|
|
import pandas as pd |
|
|
import matplotlib.pyplot as plt |
|
|
import numpy as np |
|
|
from huggingface_hub import HfApi, HfFolder, Repository, notebook_login, create_repo, upload_folder |
|
|
import os |
|
|
import shutil |
|
|
|
|
|
|
|
|
HF_USERNAME = "aarath97" |
|
|
HF_REPO = "mt5-dogri-translation" |
|
|
MODEL_NAME = "google/mt5-large" |
|
|
BATCH_SIZE = 2 |
|
|
LR = 1e-5 |
|
|
DPO_STEPS = 100 |
|
|
HGRL_STEPS = 100 |
|
|
COMBINED_STEPS = 50 |
|
|
GAMMA = 3.5 |
|
|
ALPHA = 0.5 |
|
|
BETA = 0.5 |
|
|
|
|
|
|
|
|
df = pd.read_excel("dogri_train.xlsx") |
|
|
train_data = list(zip(df['Dogri'], df['English'], df['Unpreffered'])) |
|
|
|
|
|
|
|
|
tokenizer = MT5Tokenizer.from_pretrained(MODEL_NAME) |
|
|
sbert = AutoModel.from_pretrained("sentence-transformers/all-MiniLM-L6-v2") |
|
|
sbert_tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/all-MiniLM-L6-v2") |
|
|
|
|
|
|
|
|
def compute_similarity(sent1, sent2): |
|
|
emb1 = sbert(**sbert_tokenizer(sent1, return_tensors='pt')).last_hidden_state.mean(1) |
|
|
emb2 = sbert(**sbert_tokenizer(sent2, return_tensors='pt')).last_hidden_state.mean(1) |
|
|
return cosine_similarity(emb1.detach().numpy(), emb2.detach().numpy())[0][0] |
|
|
|
|
|
def hyper_gamma_reward(rho): |
|
|
return rho * np.exp(-GAMMA * (1 - rho)) |
|
|
|
|
|
|
|
|
class DogriDataset(Dataset): |
|
|
def __init__(self, data): |
|
|
self.data = data |
|
|
|
|
|
def __len__(self): |
|
|
return len(self.data) |
|
|
|
|
|
def __getitem__(self, idx): |
|
|
return self.data[idx] |
|
|
|
|
|
dataloader = DataLoader(DogriDataset(train_data), batch_size=BATCH_SIZE, shuffle=True) |
|
|
|
|
|
|
|
|
model = MT5ForConditionalGeneration.from_pretrained(MODEL_NAME).to("cuda") |
|
|
optimizer = AdamW(model.parameters(), lr=LR) |
|
|
|
|
|
dpo_losses, hgrl_losses, final_losses = [], [], [] |
|
|
|
|
|
|
|
|
for step in range(DPO_STEPS): |
|
|
batch = next(iter(dataloader)) |
|
|
loss_batch = [] |
|
|
for src, ref, unpref in zip(*batch): |
|
|
input_ids = tokenizer(src, return_tensors='pt', truncation=True, padding=True).input_ids.to("cuda") |
|
|
ref_ids = tokenizer(ref, return_tensors='pt', truncation=True, padding=True).input_ids.to("cuda") |
|
|
unpref_ids = tokenizer(unpref, return_tensors='pt', truncation=True, padding=True).input_ids.to("cuda") |
|
|
|
|
|
ref_logprob = model(input_ids=input_ids, labels=ref_ids).loss |
|
|
unpref_logprob = model(input_ids=input_ids, labels=unpref_ids).loss |
|
|
|
|
|
logit_diff = -ref_logprob.item() + unpref_logprob.item() |
|
|
beta = 1.0 |
|
|
loss = -torch.log(torch.sigmoid(torch.tensor(beta * logit_diff))) |
|
|
loss_batch.append(loss) |
|
|
|
|
|
loss_val = torch.stack(loss_batch).mean() |
|
|
loss_val.backward() |
|
|
optimizer.step() |
|
|
optimizer.zero_grad() |
|
|
dpo_losses.append(loss_val.item()) |
|
|
|
|
|
|
|
|
for step in range(HGRL_STEPS): |
|
|
batch = next(iter(dataloader)) |
|
|
loss_batch = [] |
|
|
for src, ref, _ in zip(*batch): |
|
|
input_ids = tokenizer(src, return_tensors='pt').input_ids.to("cuda") |
|
|
gen_ids = model.generate(input_ids) |
|
|
gen_text = tokenizer.decode(gen_ids[0], skip_special_tokens=True) |
|
|
|
|
|
rho = compute_similarity(gen_text, ref) |
|
|
reward = hyper_gamma_reward(rho) |
|
|
|
|
|
labels = tokenizer(gen_text, return_tensors='pt').input_ids.to("cuda") |
|
|
logprob = model(input_ids=input_ids, labels=labels).loss |
|
|
|
|
|
loss = -reward * logprob |
|
|
loss_batch.append(loss) |
|
|
|
|
|
loss_val = torch.stack(loss_batch).mean() |
|
|
loss_val.backward() |
|
|
optimizer.step() |
|
|
optimizer.zero_grad() |
|
|
hgrl_losses.append(loss_val.item()) |
|
|
|
|
|
|
|
|
for step in range(COMBINED_STEPS): |
|
|
batch = next(iter(dataloader)) |
|
|
loss_dpo_batch, loss_hgrl_batch = [], [] |
|
|
for src, ref, unpref in zip(*batch): |
|
|
input_ids = tokenizer(src, return_tensors='pt').input_ids.to("cuda") |
|
|
ref_ids = tokenizer(ref, return_tensors='pt').input_ids.to("cuda") |
|
|
unpref_ids = tokenizer(unpref, return_tensors='pt').input_ids.to("cuda") |
|
|
|
|
|
logprob_ref = model(input_ids=input_ids, labels=ref_ids).loss |
|
|
logprob_unpref = model(input_ids=input_ids, labels=unpref_ids).loss |
|
|
dpo_loss = -torch.log(torch.sigmoid(torch.tensor(logprob_unpref.item() - logprob_ref.item()))) |
|
|
loss_dpo_batch.append(dpo_loss) |
|
|
|
|
|
gen_ids = model.generate(input_ids) |
|
|
gen_text = tokenizer.decode(gen_ids[0], skip_special_tokens=True) |
|
|
rho = compute_similarity(gen_text, ref) |
|
|
reward = hyper_gamma_reward(rho) |
|
|
|
|
|
labels = tokenizer(gen_text, return_tensors='pt').input_ids.to("cuda") |
|
|
logprob = model(input_ids=input_ids, labels=labels).loss |
|
|
hgrl_loss = -reward * logprob |
|
|
loss_hgrl_batch.append(hgrl_loss) |
|
|
|
|
|
loss_dpo_mean = torch.stack(loss_dpo_batch).mean() |
|
|
loss_hgrl_mean = torch.stack(loss_hgrl_batch).mean() |
|
|
combined_loss = ALPHA * loss_dpo_mean + BETA * loss_hgrl_mean |
|
|
combined_loss.backward() |
|
|
optimizer.step() |
|
|
optimizer.zero_grad() |
|
|
final_losses.append(combined_loss.item()) |
|
|
|
|
|
|
|
|
plt.plot(dpo_losses, label="DPO") |
|
|
plt.plot(hgrl_losses, label="HGRL") |
|
|
plt.plot(final_losses, label="Combined") |
|
|
plt.xlabel("Steps") |
|
|
plt.ylabel("Loss") |
|
|
plt.legend() |
|
|
plt.savefig("loss_curve.png") |
|
|
|
|
|
with open("loss_report.txt", "w") as f: |
|
|
f.write("DPO Final Loss: {:.4f}\n".format(dpo_losses[-1])) |
|
|
f.write("HGRL Final Loss: {:.4f}\n".format(hgrl_losses[-1])) |
|
|
f.write("Combined Final Loss: {:.4f}\n".format(final_losses[-1])) |
|
|
|
|
|
|
|
|
test_df = pd.read_excel("in22conv.xlsx") |
|
|
test_outputs = [] |
|
|
for line in test_df.iloc[:, 0].tolist(): |
|
|
input_ids = tokenizer(line, return_tensors='pt').input_ids.to("cuda") |
|
|
outputs = model.generate(input_ids) |
|
|
translation = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
test_outputs.append(translation) |
|
|
|
|
|
output_df = pd.DataFrame({"Dogri": test_df.iloc[:, 0], "English": test_outputs}) |
|
|
output_df.to_excel("translated_output.xlsx", index=False) |
|
|
|
|
|
|
|
|
from collections import OrderedDict |
|
|
from typing import Any, Mapping, Optional |
|
|
|
|
|
from transformers import PreTrainedTokenizer |
|
|
from transformers.configuration_utils import PretrainedConfig |
|
|
from transformers.onnx import OnnxConfig, OnnxSeq2SeqConfigWithPast |
|
|
from transformers.onnx.utils import compute_effective_axis_dimension |
|
|
from transformers.utils import TensorType, is_torch_available |
|
|
|
|
|
|
|
|
|
|
|
class IndicTransConfig(PretrainedConfig): |
|
|
r""" |
|
|
This is the configuration class to store the configuration of a [`IT2Model`]. It is used to instantiate an |
|
|
IT2 model according to the specified arguments, defining the model architecture. Instantiating a configuration |
|
|
with the defaults will yield a similar configuration to that of the IT2 |
|
|
|
|
|
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the |
|
|
documentation from [`PretrainedConfig`] for more information. |
|
|
|
|
|
|
|
|
Args: |
|
|
vocab_size (`int`, *optional*, defaults to 50265): |
|
|
Vocabulary size of the IT2 model. Defines the number of different tokens that can be represented by the |
|
|
`inputs_ids` passed when calling [`IT2Model`] or |
|
|
d_model (`int`, *optional*, defaults to 1024): |
|
|
Dimensionality of the layers and the pooler layer. |
|
|
encoder_layers (`int`, *optional*, defaults to 12): |
|
|
Number of encoder layers. |
|
|
decoder_layers (`int`, *optional*, defaults to 12): |
|
|
Number of decoder layers. |
|
|
encoder_attention_heads (`int`, *optional*, defaults to 16): |
|
|
Number of attention heads for each attention layer in the Transformer encoder. |
|
|
decoder_attention_heads (`int`, *optional*, defaults to 16): |
|
|
Number of attention heads for each attention layer in the Transformer decoder. |
|
|
decoder_ffn_dim (`int`, *optional*, defaults to 4096): |
|
|
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. |
|
|
encoder_ffn_dim (`int`, *optional*, defaults to 4096): |
|
|
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. |
|
|
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): |
|
|
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, |
|
|
`"relu"`, `"silu"` and `"gelu_new"` are supported. |
|
|
dropout (`float`, *optional*, defaults to 0.1): |
|
|
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. |
|
|
attention_dropout (`float`, *optional*, defaults to 0.0): |
|
|
The dropout ratio for the attention probabilities. |
|
|
activation_dropout (`float`, *optional*, defaults to 0.0): |
|
|
The dropout ratio for activations inside the fully connected layer. |
|
|
classifier_dropout (`float`, *optional*, defaults to 0.0): |
|
|
The dropout ratio for classifier. |
|
|
max_position_embeddings (`int`, *optional*, defaults to 1024): |
|
|
The maximum sequence length that this model might ever be used with. Typically set this to something large |
|
|
just in case (e.g., 512 or 1024 or 2048). |
|
|
init_std (`float`, *optional*, defaults to 0.02): |
|
|
The standard deviation of the truncated_normal_initializer for initializing all weight matrices. |
|
|
encoder_layerdrop (`float`, *optional*, defaults to 0.0): |
|
|
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) |
|
|
for more details. |
|
|
decoder_layerdrop (`float`, *optional*, defaults to 0.0): |
|
|
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) |
|
|
for more details. |
|
|
use_cache (`bool`, *optional*, defaults to `True`): |
|
|
Whether or not the model should return the last key/values attentions (not used by all models). |
|
|
```""" |
|
|
model_type = "IndicTrans" |
|
|
keys_to_ignore_at_inference = ["past_key_values"] |
|
|
attribute_map = { |
|
|
"num_attention_heads": "encoder_attention_heads", |
|
|
"hidden_size": "d_model", |
|
|
} |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
encoder_vocab_size=None, |
|
|
decoder_vocab_size=None, |
|
|
encoder_embed_dim=512, |
|
|
decoder_embed_dim=512, |
|
|
max_source_positions=210, |
|
|
max_target_positions=210, |
|
|
encoder_layers=6, |
|
|
encoder_ffn_dim=2048, |
|
|
encoder_attention_heads=8, |
|
|
decoder_layers=6, |
|
|
decoder_ffn_dim=2048, |
|
|
decoder_attention_heads=8, |
|
|
encoder_layerdrop=0.00, |
|
|
decoder_layerdrop=0.00, |
|
|
use_cache=True, |
|
|
is_encoder_decoder=True, |
|
|
activation_function="relu", |
|
|
encoder_normalize_before=False, |
|
|
decoder_normalize_before=False, |
|
|
layernorm_embedding=False, |
|
|
share_decoder_input_output_embed=False, |
|
|
dropout=0.1, |
|
|
attention_dropout=0.0, |
|
|
activation_dropout=0.0, |
|
|
init_std=0.02, |
|
|
scale_embedding=True, |
|
|
decoder_start_token_id=2, |
|
|
pad_token_id=1, |
|
|
bos_token_id=0, |
|
|
eos_token_id=2, |
|
|
attn_implementation="eager", |
|
|
**kwargs, |
|
|
): |
|
|
self.encoder_vocab_size = encoder_vocab_size |
|
|
self.decoder_vocab_size = decoder_vocab_size |
|
|
self.encoder_normalize_before = encoder_normalize_before |
|
|
self.decoder_normalize_before = decoder_normalize_before |
|
|
self.layernorm_embedding = layernorm_embedding |
|
|
self.max_source_positions = max_source_positions |
|
|
self.max_target_positions = max_target_positions |
|
|
self.encoder_embed_dim = encoder_embed_dim |
|
|
self.decoder_embed_dim = decoder_embed_dim |
|
|
self.encoder_ffn_dim = encoder_ffn_dim |
|
|
self.encoder_layers = encoder_layers |
|
|
self.encoder_attention_heads = encoder_attention_heads |
|
|
self.decoder_ffn_dim = decoder_ffn_dim |
|
|
self.decoder_layers = decoder_layers |
|
|
self.decoder_attention_heads = decoder_attention_heads |
|
|
self.dropout = dropout |
|
|
self.attention_dropout = attention_dropout |
|
|
self.activation_dropout = activation_dropout |
|
|
self.activation_function = activation_function |
|
|
self.init_std = init_std |
|
|
self.encoder_layerdrop = encoder_layerdrop |
|
|
self.decoder_layerdrop = decoder_layerdrop |
|
|
self.use_cache = use_cache |
|
|
self.num_hidden_layers = encoder_layers |
|
|
self.scale_embedding = scale_embedding |
|
|
self.share_decoder_input_output_embed = share_decoder_input_output_embed |
|
|
self.attn_implementation = attn_implementation |
|
|
|
|
|
super().__init__( |
|
|
pad_token_id=pad_token_id, |
|
|
bos_token_id=bos_token_id, |
|
|
eos_token_id=eos_token_id, |
|
|
is_encoder_decoder=is_encoder_decoder, |
|
|
decoder_start_token_id=decoder_start_token_id, |
|
|
**kwargs, |
|
|
) |
|
|
|
|
|
|
|
|
class IndicTransOnnxConfig(OnnxSeq2SeqConfigWithPast): |
|
|
@property |
|
|
def inputs(self) -> Mapping[str, Mapping[int, str]]: |
|
|
common_inputs = OrderedDict( |
|
|
[ |
|
|
("input_ids", {0: "batch", 1: "encoder_sequence"}), |
|
|
("attention_mask", {0: "batch", 1: "encoder_sequence"}), |
|
|
] |
|
|
) |
|
|
|
|
|
if self.use_past: |
|
|
common_inputs["decoder_input_ids"] = {0: "batch"} |
|
|
common_inputs["decoder_attention_mask"] = { |
|
|
0: "batch", |
|
|
1: "past_decoder_sequence + sequence", |
|
|
} |
|
|
else: |
|
|
common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"} |
|
|
common_inputs["decoder_attention_mask"] = { |
|
|
0: "batch", |
|
|
1: "decoder_sequence", |
|
|
} |
|
|
|
|
|
if self.use_past: |
|
|
self.fill_with_past_key_values_(common_inputs, direction="inputs") |
|
|
return common_inputs |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _generate_dummy_inputs_for_sequence_classification_and_question_answering( |
|
|
self, |
|
|
tokenizer: PreTrainedTokenizer, |
|
|
batch_size: int = -1, |
|
|
seq_length: int = -1, |
|
|
is_pair: bool = False, |
|
|
framework: Optional[TensorType] = None, |
|
|
) -> Mapping[str, Any]: |
|
|
|
|
|
|
|
|
|
|
|
batch_size = compute_effective_axis_dimension( |
|
|
batch_size, |
|
|
fixed_dimension=OnnxConfig.default_fixed_batch, |
|
|
num_token_to_add=0, |
|
|
) |
|
|
|
|
|
|
|
|
token_to_add = tokenizer.num_special_tokens_to_add(is_pair) |
|
|
seq_length = compute_effective_axis_dimension( |
|
|
seq_length, |
|
|
fixed_dimension=OnnxConfig.default_fixed_sequence, |
|
|
num_token_to_add=token_to_add, |
|
|
) |
|
|
|
|
|
|
|
|
dummy_input = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size |
|
|
common_inputs = dict(tokenizer(dummy_input, return_tensors=framework)) |
|
|
return common_inputs |
|
|
|
|
|
|
|
|
def _generate_dummy_inputs_for_default_and_seq2seq_lm( |
|
|
self, |
|
|
tokenizer: PreTrainedTokenizer, |
|
|
batch_size: int = -1, |
|
|
seq_length: int = -1, |
|
|
is_pair: bool = False, |
|
|
framework: Optional[TensorType] = None, |
|
|
) -> Mapping[str, Any]: |
|
|
encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( |
|
|
tokenizer, batch_size, seq_length, is_pair, framework |
|
|
) |
|
|
|
|
|
|
|
|
decoder_seq_length = seq_length if not self.use_past else 1 |
|
|
decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( |
|
|
tokenizer, batch_size, decoder_seq_length, is_pair, framework |
|
|
) |
|
|
decoder_inputs = { |
|
|
f"decoder_{name}": tensor for name, tensor in decoder_inputs.items() |
|
|
} |
|
|
common_inputs = dict(**encoder_inputs, **decoder_inputs) |
|
|
|
|
|
if self.use_past: |
|
|
if not is_torch_available(): |
|
|
raise ValueError( |
|
|
"Cannot generate dummy past_keys inputs without PyTorch installed." |
|
|
) |
|
|
else: |
|
|
import torch |
|
|
batch, encoder_seq_length = common_inputs["input_ids"].shape |
|
|
decoder_seq_length = common_inputs["decoder_input_ids"].shape[1] |
|
|
( |
|
|
num_encoder_attention_heads, |
|
|
num_decoder_attention_heads, |
|
|
) = self.num_attention_heads |
|
|
encoder_shape = ( |
|
|
batch, |
|
|
num_encoder_attention_heads, |
|
|
encoder_seq_length, |
|
|
self._config.hidden_size // num_encoder_attention_heads, |
|
|
) |
|
|
decoder_past_length = decoder_seq_length + 3 |
|
|
decoder_shape = ( |
|
|
batch, |
|
|
num_decoder_attention_heads, |
|
|
decoder_past_length, |
|
|
self._config.hidden_size // num_decoder_attention_heads, |
|
|
) |
|
|
|
|
|
common_inputs["decoder_attention_mask"] = torch.cat( |
|
|
[ |
|
|
common_inputs["decoder_attention_mask"], |
|
|
torch.ones(batch, decoder_past_length), |
|
|
], |
|
|
dim=1, |
|
|
) |
|
|
|
|
|
common_inputs["past_key_values"] = [] |
|
|
|
|
|
num_encoder_layers, num_decoder_layers = self.num_layers |
|
|
min_num_layers = min(num_encoder_layers, num_decoder_layers) |
|
|
max_num_layers = ( |
|
|
max(num_encoder_layers, num_decoder_layers) - min_num_layers |
|
|
) |
|
|
remaining_side_name = ( |
|
|
"encoder" if num_encoder_layers > num_decoder_layers else "decoder" |
|
|
) |
|
|
|
|
|
for _ in range(min_num_layers): |
|
|
common_inputs["past_key_values"].append( |
|
|
( |
|
|
torch.zeros(decoder_shape), |
|
|
torch.zeros(decoder_shape), |
|
|
torch.zeros(encoder_shape), |
|
|
torch.zeros(encoder_shape), |
|
|
) |
|
|
) |
|
|
|
|
|
shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape |
|
|
for _ in range(min_num_layers, max_num_layers): |
|
|
common_inputs["past_key_values"].append( |
|
|
(torch.zeros(shape), torch.zeros(shape)) |
|
|
) |
|
|
return common_inputs |
|
|
|
|
|
generate_dummy_inputs = _generate_dummy_inputs_for_default_and_seq2seq_lm |
|
|
|
|
|
import math |
|
|
from typing import List, Optional, Tuple, Union |
|
|
|
|
|
import torch |
|
|
import torch.nn as nn |
|
|
from torch.nn import functional as F |
|
|
|
|
|
from transformers.activations import ACT2FN |
|
|
|
|
|
from transformers.modeling_attn_mask_utils import ( |
|
|
_prepare_4d_attention_mask, |
|
|
_prepare_4d_attention_mask_for_sdpa, |
|
|
_prepare_4d_causal_attention_mask, |
|
|
_prepare_4d_causal_attention_mask_for_sdpa, |
|
|
) |
|
|
|
|
|
from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled |
|
|
from transformers.modeling_outputs import ( |
|
|
BaseModelOutput, |
|
|
BaseModelOutputWithPastAndCrossAttentions, |
|
|
Seq2SeqLMOutput, |
|
|
Seq2SeqModelOutput |
|
|
) |
|
|
|
|
|
from transformers.utils import ( |
|
|
logging, |
|
|
is_flash_attn_2_available, |
|
|
is_flash_attn_greater_or_equal_2_10, |
|
|
) |
|
|
|
|
|
from transformers.modeling_utils import PreTrainedModel |
|
|
from transformers.generation.utils import GenerationMixin |
|
|
|
|
|
from .configuration_indictrans import IndicTransConfig |
|
|
|
|
|
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
|
|
INDICTRANS_PRETRAINED_MODEL_ARCHIVE_LIST = [""] |
|
|
|
|
|
try: |
|
|
if is_flash_attn_2_available(): |
|
|
from flash_attn import flash_attn_func, flash_attn_varlen_func |
|
|
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input |
|
|
except: |
|
|
pass |
|
|
|
|
|
|
|
|
|
|
|
def _get_unpad_data(attention_mask): |
|
|
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) |
|
|
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() |
|
|
max_seqlen_in_batch = seqlens_in_batch.max().item() |
|
|
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) |
|
|
return ( |
|
|
indices, |
|
|
cu_seqlens, |
|
|
max_seqlen_in_batch, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
def shift_tokens_right( |
|
|
input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int |
|
|
): |
|
|
""" |
|
|
Shift input ids one token to the right. |
|
|
""" |
|
|
shifted_input_ids = input_ids.new_zeros(input_ids.shape) |
|
|
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() |
|
|
shifted_input_ids[:, 0] = decoder_start_token_id |
|
|
|
|
|
if pad_token_id is None: |
|
|
raise ValueError("self.model.config.pad_token_id has to be defined.") |
|
|
|
|
|
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) |
|
|
|
|
|
return shifted_input_ids |
|
|
|
|
|
|
|
|
def create_position_ids_from_input_ids( |
|
|
input_ids, padding_idx, past_key_values_length=0 |
|
|
): |
|
|
""" |
|
|
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols |
|
|
are ignored. This is modified from fairseq's `utils.make_positions`. |
|
|
""" |
|
|
|
|
|
mask = input_ids.ne(padding_idx).int() |
|
|
incremental_indices = ( |
|
|
torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length |
|
|
) * mask |
|
|
return incremental_indices.long() + padding_idx |
|
|
|
|
|
|
|
|
|
|
|
class IndicTransSinusoidalPositionalEmbedding(nn.Module): |
|
|
"""This module produces sinusoidal positional embeddings of any length.""" |
|
|
|
|
|
def __init__( |
|
|
self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None |
|
|
): |
|
|
super().__init__() |
|
|
self.offset = 2 |
|
|
self.embedding_dim = embedding_dim |
|
|
self.padding_idx = padding_idx |
|
|
self.make_weights(num_positions + self.offset, embedding_dim, padding_idx) |
|
|
|
|
|
def make_weights( |
|
|
self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None |
|
|
): |
|
|
emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx) |
|
|
if hasattr(self, "weights"): |
|
|
|
|
|
emb_weights = emb_weights.to( |
|
|
dtype=self.weights.dtype, device=self.weights.device |
|
|
) |
|
|
|
|
|
self.register_buffer("weights", emb_weights, persistent=False) |
|
|
|
|
|
@staticmethod |
|
|
def get_embedding( |
|
|
num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None |
|
|
): |
|
|
""" |
|
|
Build sinusoidal embeddings. |
|
|
|
|
|
This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of |
|
|
"Attention Is All You Need". |
|
|
""" |
|
|
half_dim = embedding_dim // 2 |
|
|
emb = math.log(10000) / (half_dim - 1) |
|
|
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) |
|
|
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze( |
|
|
1 |
|
|
) * emb.unsqueeze(0) |
|
|
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view( |
|
|
num_embeddings, -1 |
|
|
) |
|
|
if embedding_dim % 2 == 1: |
|
|
|
|
|
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) |
|
|
if padding_idx is not None: |
|
|
emb[padding_idx, :] = 0 |
|
|
|
|
|
return emb.to(torch.get_default_dtype()) |
|
|
|
|
|
@torch.no_grad() |
|
|
def forward( |
|
|
self, |
|
|
input_ids: torch.Tensor = None, |
|
|
inputs_embeds: torch.Tensor = None, |
|
|
past_key_values_length: int = 0, |
|
|
): |
|
|
if input_ids is not None: |
|
|
bsz, seq_len = input_ids.size() |
|
|
|
|
|
position_ids = create_position_ids_from_input_ids( |
|
|
input_ids, self.padding_idx, past_key_values_length |
|
|
).to(input_ids.device) |
|
|
else: |
|
|
bsz, seq_len = inputs_embeds.size()[:-1] |
|
|
position_ids = self.create_position_ids_from_inputs_embeds( |
|
|
inputs_embeds, past_key_values_length |
|
|
) |
|
|
|
|
|
|
|
|
max_pos = self.padding_idx + 1 + seq_len + past_key_values_length |
|
|
if max_pos > self.weights.size(0): |
|
|
self.make_weights( |
|
|
max_pos + self.offset, self.embedding_dim, self.padding_idx |
|
|
) |
|
|
|
|
|
return ( |
|
|
self.weights.index_select(0, position_ids.view(-1)) |
|
|
.view(bsz, seq_len, self.weights.shape[-1]) |
|
|
.detach() |
|
|
) |
|
|
|
|
|
def create_position_ids_from_inputs_embeds( |
|
|
self, inputs_embeds, past_key_values_length |
|
|
): |
|
|
""" |
|
|
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. |
|
|
|
|
|
Args: |
|
|
inputs_embeds: torch.Tensor |
|
|
|
|
|
Returns: torch.Tensor |
|
|
""" |
|
|
input_shape = inputs_embeds.size()[:-1] |
|
|
sequence_length = input_shape[1] |
|
|
|
|
|
position_ids = torch.arange( |
|
|
self.padding_idx + 1, |
|
|
sequence_length + self.padding_idx + 1, |
|
|
dtype=torch.long, |
|
|
device=inputs_embeds.device, |
|
|
) |
|
|
return ( |
|
|
position_ids.unsqueeze(0).expand(input_shape).contiguous() |
|
|
+ past_key_values_length |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
class IndicTransAttention(nn.Module): |
|
|
"""Multi-headed attention from 'Attention Is All You Need' paper""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
embed_dim: int, |
|
|
num_heads: int, |
|
|
dropout: float = 0.0, |
|
|
is_decoder: bool = False, |
|
|
bias: bool = True, |
|
|
is_causal: bool = False, |
|
|
config: Optional[IndicTransConfig] = None, |
|
|
): |
|
|
super().__init__() |
|
|
self.embed_dim = embed_dim |
|
|
self.num_heads = num_heads |
|
|
self.dropout = dropout |
|
|
self.head_dim = embed_dim // num_heads |
|
|
self.config = config |
|
|
|
|
|
if (self.head_dim * num_heads) != self.embed_dim: |
|
|
raise ValueError( |
|
|
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" |
|
|
f" and `num_heads`: {num_heads})." |
|
|
) |
|
|
self.scaling = self.head_dim**-0.5 |
|
|
self.is_decoder = is_decoder |
|
|
self.is_causal = is_causal |
|
|
|
|
|
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) |
|
|
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) |
|
|
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) |
|
|
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) |
|
|
|
|
|
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): |
|
|
return ( |
|
|
tensor.view(bsz, seq_len, self.num_heads, self.head_dim) |
|
|
.transpose(1, 2) |
|
|
.contiguous() |
|
|
) |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
hidden_states: torch.Tensor, |
|
|
key_value_states: Optional[torch.Tensor] = None, |
|
|
past_key_value: Optional[Tuple[torch.Tensor]] = None, |
|
|
attention_mask: Optional[torch.Tensor] = None, |
|
|
layer_head_mask: Optional[torch.Tensor] = None, |
|
|
output_attentions: bool = False, |
|
|
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: |
|
|
"""Input shape: Batch x Time x Channel""" |
|
|
|
|
|
|
|
|
|
|
|
is_cross_attention = key_value_states is not None |
|
|
|
|
|
bsz, tgt_len, _ = hidden_states.size() |
|
|
|
|
|
|
|
|
query_states = self.q_proj(hidden_states) * self.scaling |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if ( |
|
|
is_cross_attention |
|
|
and past_key_value is not None |
|
|
and past_key_value[0].shape[2] == key_value_states.shape[1] |
|
|
): |
|
|
|
|
|
key_states = past_key_value[0] |
|
|
value_states = past_key_value[1] |
|
|
elif is_cross_attention: |
|
|
|
|
|
key_states = self._shape(self.k_proj(key_value_states), -1, bsz) |
|
|
value_states = self._shape(self.v_proj(key_value_states), -1, bsz) |
|
|
elif past_key_value is not None: |
|
|
|
|
|
key_states = self._shape(self.k_proj(hidden_states), -1, bsz) |
|
|
value_states = self._shape(self.v_proj(hidden_states), -1, bsz) |
|
|
key_states = torch.cat([past_key_value[0], key_states], dim=2) |
|
|
value_states = torch.cat([past_key_value[1], value_states], dim=2) |
|
|
else: |
|
|
|
|
|
key_states = self._shape(self.k_proj(hidden_states), -1, bsz) |
|
|
value_states = self._shape(self.v_proj(hidden_states), -1, bsz) |
|
|
|
|
|
if self.is_decoder: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
past_key_value = (key_states, value_states) |
|
|
|
|
|
proj_shape = (bsz * self.num_heads, -1, self.head_dim) |
|
|
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) |
|
|
key_states = key_states.reshape(*proj_shape) |
|
|
value_states = value_states.reshape(*proj_shape) |
|
|
|
|
|
src_len = key_states.size(1) |
|
|
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) |
|
|
|
|
|
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): |
|
|
raise ValueError( |
|
|
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" |
|
|
f" {attn_weights.size()}" |
|
|
) |
|
|
|
|
|
if attention_mask is not None: |
|
|
if attention_mask.size() != (bsz, 1, tgt_len, src_len): |
|
|
raise ValueError( |
|
|
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" |
|
|
) |
|
|
attn_weights = ( |
|
|
attn_weights.view(bsz, self.num_heads, tgt_len, src_len) |
|
|
+ attention_mask |
|
|
) |
|
|
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) |
|
|
|
|
|
attn_weights = F.softmax(attn_weights, dim=-1) |
|
|
|
|
|
if layer_head_mask is not None: |
|
|
if layer_head_mask.size() != (self.num_heads,): |
|
|
raise ValueError( |
|
|
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" |
|
|
f" {layer_head_mask.size()}" |
|
|
) |
|
|
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view( |
|
|
bsz, self.num_heads, tgt_len, src_len |
|
|
) |
|
|
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) |
|
|
|
|
|
if output_attentions: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
attn_weights_reshaped = attn_weights.view( |
|
|
bsz, self.num_heads, tgt_len, src_len |
|
|
) |
|
|
attn_weights = attn_weights_reshaped.view( |
|
|
bsz * self.num_heads, tgt_len, src_len |
|
|
) |
|
|
else: |
|
|
attn_weights_reshaped = None |
|
|
|
|
|
attn_probs = F.dropout(attn_weights, p=self.dropout, training=self.training) |
|
|
|
|
|
attn_output = torch.bmm(attn_probs, value_states) |
|
|
|
|
|
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): |
|
|
raise ValueError( |
|
|
f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" |
|
|
f" {attn_output.size()}" |
|
|
) |
|
|
|
|
|
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) |
|
|
attn_output = attn_output.transpose(1, 2) |
|
|
|
|
|
|
|
|
|
|
|
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) |
|
|
|
|
|
attn_output = self.out_proj(attn_output) |
|
|
|
|
|
return attn_output, attn_weights_reshaped, past_key_value |
|
|
|
|
|
|
|
|
class IndicTransFlashAttention2(IndicTransAttention): |
|
|
""" |
|
|
IndicTrans flash attention module. This module inherits from `IndicTransAttention` as the weights of the module stays |
|
|
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of |
|
|
flash attention and deal with padding tokens in case the input contains any of them. |
|
|
""" |
|
|
|
|
|
|
|
|
def __init__(self, *args, **kwargs): |
|
|
super().__init__(*args, **kwargs) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() |
|
|
|
|
|
def _reshape(self, tensor: torch.Tensor, seq_len: int, bsz: int): |
|
|
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim) |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
hidden_states: torch.Tensor, |
|
|
key_value_states: Optional[torch.Tensor] = None, |
|
|
past_key_value: Optional[Tuple[torch.Tensor]] = None, |
|
|
attention_mask: Optional[torch.Tensor] = None, |
|
|
layer_head_mask: Optional[torch.Tensor] = None, |
|
|
output_attentions: bool = False, |
|
|
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: |
|
|
|
|
|
if output_attentions: |
|
|
raise ValueError("IndicTransFlashAttention2 attention does not support output_attentions") |
|
|
|
|
|
|
|
|
|
|
|
is_cross_attention = key_value_states is not None |
|
|
|
|
|
bsz, q_len, _ = hidden_states.size() |
|
|
|
|
|
|
|
|
query_states = self._reshape(self.q_proj(hidden_states), -1, bsz) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if ( |
|
|
is_cross_attention |
|
|
and past_key_value is not None |
|
|
and past_key_value[0].shape[2] == key_value_states.shape[1] |
|
|
): |
|
|
|
|
|
key_states = past_key_value[0].transpose(1, 2) |
|
|
value_states = past_key_value[1].transpose(1, 2) |
|
|
elif is_cross_attention: |
|
|
|
|
|
key_states = self._reshape(self.k_proj(key_value_states), -1, bsz) |
|
|
value_states = self._reshape(self.v_proj(key_value_states), -1, bsz) |
|
|
elif past_key_value is not None: |
|
|
|
|
|
key_states = self._reshape(self.k_proj(hidden_states), -1, bsz) |
|
|
value_states = self._reshape(self.v_proj(hidden_states), -1, bsz) |
|
|
key_states = torch.cat([past_key_value[0].transpose(1, 2), key_states], dim=1) |
|
|
value_states = torch.cat([past_key_value[1].transpose(1, 2), value_states], dim=1) |
|
|
else: |
|
|
|
|
|
key_states = self._reshape(self.k_proj(hidden_states), -1, bsz) |
|
|
value_states = self._reshape(self.v_proj(hidden_states), -1, bsz) |
|
|
|
|
|
if self.is_decoder: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
past_key_value = (key_states.transpose(1, 2), value_states.transpose(1, 2)) |
|
|
|
|
|
kv_seq_len = key_states.shape[-2] |
|
|
if past_key_value is not None: |
|
|
kv_seq_len += past_key_value[0].shape[-2] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
input_dtype = query_states.dtype |
|
|
if input_dtype == torch.float32: |
|
|
if torch.is_autocast_enabled(): |
|
|
target_dtype = torch.get_autocast_gpu_dtype() |
|
|
|
|
|
elif hasattr(self.config, "_pre_quantization_dtype"): |
|
|
target_dtype = self.config._pre_quantization_dtype |
|
|
else: |
|
|
target_dtype = self.q_proj.weight.dtype |
|
|
|
|
|
logger.warning_once( |
|
|
f"The input hidden states seems to be silently casted in float32, this might be related to" |
|
|
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" |
|
|
f" {target_dtype}." |
|
|
) |
|
|
|
|
|
query_states = query_states.to(target_dtype) |
|
|
key_states = key_states.to(target_dtype) |
|
|
value_states = value_states.to(target_dtype) |
|
|
|
|
|
attn_output = self._flash_attention_forward( |
|
|
query_states, key_states, value_states, attention_mask, q_len, dropout=self.dropout |
|
|
) |
|
|
|
|
|
attn_output = attn_output.reshape(bsz, q_len, -1) |
|
|
attn_output = self.out_proj(attn_output) |
|
|
|
|
|
if not output_attentions: |
|
|
attn_weights = None |
|
|
|
|
|
return attn_output, attn_weights, past_key_value |
|
|
|
|
|
|
|
|
def _flash_attention_forward( |
|
|
self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None |
|
|
): |
|
|
""" |
|
|
Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token |
|
|
first unpad the input, then computes the attention scores and pad the final attention scores. |
|
|
|
|
|
Args: |
|
|
query_states (`torch.Tensor`): |
|
|
Input query states to be passed to Flash Attention API |
|
|
key_states (`torch.Tensor`): |
|
|
Input key states to be passed to Flash Attention API |
|
|
value_states (`torch.Tensor`): |
|
|
Input value states to be passed to Flash Attention API |
|
|
attention_mask (`torch.Tensor`): |
|
|
The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the |
|
|
position of padding tokens and 1 for the position of non-padding tokens. |
|
|
dropout (`float`): |
|
|
Attention dropout |
|
|
softmax_scale (`float`, *optional*): |
|
|
The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) |
|
|
""" |
|
|
if not self._flash_attn_uses_top_left_mask: |
|
|
causal = self.is_causal |
|
|
else: |
|
|
|
|
|
causal = self.is_causal and query_length != 1 |
|
|
|
|
|
|
|
|
if attention_mask is not None: |
|
|
batch_size = query_states.shape[0] |
|
|
query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( |
|
|
query_states, key_states, value_states, attention_mask, query_length |
|
|
) |
|
|
|
|
|
cu_seqlens_q, cu_seqlens_k = cu_seq_lens |
|
|
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens |
|
|
|
|
|
attn_output_unpad = flash_attn_varlen_func( |
|
|
query_states, |
|
|
key_states, |
|
|
value_states, |
|
|
cu_seqlens_q=cu_seqlens_q, |
|
|
cu_seqlens_k=cu_seqlens_k, |
|
|
max_seqlen_q=max_seqlen_in_batch_q, |
|
|
max_seqlen_k=max_seqlen_in_batch_k, |
|
|
dropout_p=dropout, |
|
|
softmax_scale=softmax_scale, |
|
|
causal=causal, |
|
|
) |
|
|
|
|
|
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) |
|
|
else: |
|
|
attn_output = flash_attn_func( |
|
|
query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal |
|
|
) |
|
|
|
|
|
return attn_output |
|
|
|
|
|
|
|
|
def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): |
|
|
indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) |
|
|
batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape |
|
|
|
|
|
key_layer = index_first_axis( |
|
|
key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k |
|
|
) |
|
|
value_layer = index_first_axis( |
|
|
value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k |
|
|
) |
|
|
if query_length == kv_seq_len: |
|
|
query_layer = index_first_axis( |
|
|
query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k |
|
|
) |
|
|
cu_seqlens_q = cu_seqlens_k |
|
|
max_seqlen_in_batch_q = max_seqlen_in_batch_k |
|
|
indices_q = indices_k |
|
|
elif query_length == 1: |
|
|
max_seqlen_in_batch_q = 1 |
|
|
cu_seqlens_q = torch.arange( |
|
|
batch_size + 1, dtype=torch.int32, device=query_layer.device |
|
|
) |
|
|
indices_q = cu_seqlens_q[:-1] |
|
|
query_layer = query_layer.squeeze(1) |
|
|
else: |
|
|
|
|
|
attention_mask = attention_mask[:, -query_length:] |
|
|
query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) |
|
|
|
|
|
return ( |
|
|
query_layer, |
|
|
key_layer, |
|
|
value_layer, |
|
|
indices_q, |
|
|
(cu_seqlens_q, cu_seqlens_k), |
|
|
(max_seqlen_in_batch_q, max_seqlen_in_batch_k), |
|
|
) |
|
|
|
|
|
|
|
|
class IndicTransSdpaAttention(IndicTransAttention): |
|
|
def forward( |
|
|
self, |
|
|
hidden_states: torch.Tensor, |
|
|
key_value_states: Optional[torch.Tensor] = None, |
|
|
past_key_value: Optional[Tuple[torch.Tensor]] = None, |
|
|
attention_mask: Optional[torch.Tensor] = None, |
|
|
layer_head_mask: Optional[torch.Tensor] = None, |
|
|
output_attentions: bool = False, |
|
|
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: |
|
|
"""Input shape: Batch x Time x Channel""" |
|
|
if output_attentions or layer_head_mask is not None: |
|
|
|
|
|
logger.warning_once( |
|
|
"IndicTransModel is using IndicTransSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` or `layer_head_mask` not None. Falling back to the manual attention" |
|
|
' implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' |
|
|
) |
|
|
return super().forward( |
|
|
hidden_states, |
|
|
key_value_states=key_value_states, |
|
|
past_key_value=past_key_value, |
|
|
attention_mask=attention_mask, |
|
|
layer_head_mask=layer_head_mask, |
|
|
output_attentions=output_attentions, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
is_cross_attention = key_value_states is not None |
|
|
|
|
|
bsz, tgt_len, _ = hidden_states.size() |
|
|
|
|
|
|
|
|
query_states = self.q_proj(hidden_states) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if ( |
|
|
is_cross_attention |
|
|
and past_key_value is not None |
|
|
and past_key_value[0].shape[2] == key_value_states.shape[1] |
|
|
): |
|
|
|
|
|
key_states = past_key_value[0] |
|
|
value_states = past_key_value[1] |
|
|
elif is_cross_attention: |
|
|
|
|
|
key_states = self._shape(self.k_proj(key_value_states), -1, bsz) |
|
|
value_states = self._shape(self.v_proj(key_value_states), -1, bsz) |
|
|
elif past_key_value is not None: |
|
|
|
|
|
key_states = self._shape(self.k_proj(hidden_states), -1, bsz) |
|
|
value_states = self._shape(self.v_proj(hidden_states), -1, bsz) |
|
|
key_states = torch.cat([past_key_value[0], key_states], dim=2) |
|
|
value_states = torch.cat([past_key_value[1], value_states], dim=2) |
|
|
else: |
|
|
|
|
|
key_states = self._shape(self.k_proj(hidden_states), -1, bsz) |
|
|
value_states = self._shape(self.v_proj(hidden_states), -1, bsz) |
|
|
|
|
|
if self.is_decoder: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
past_key_value = (key_states, value_states) |
|
|
|
|
|
query_states = self._shape(query_states, tgt_len, bsz) |
|
|
|
|
|
|
|
|
|
|
|
attn_output = F.scaled_dot_product_attention( |
|
|
query_states, |
|
|
key_states, |
|
|
value_states, |
|
|
attn_mask=attention_mask, |
|
|
dropout_p=self.dropout if self.training else 0.0, |
|
|
|
|
|
is_causal=self.is_causal and attention_mask is None and tgt_len > 1, |
|
|
) |
|
|
|
|
|
if attn_output.size() != (bsz, self.num_heads, tgt_len, self.head_dim): |
|
|
raise ValueError( |
|
|
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" |
|
|
f" {attn_output.size()}" |
|
|
) |
|
|
|
|
|
attn_output = attn_output.transpose(1, 2) |
|
|
|
|
|
|
|
|
|
|
|
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) |
|
|
|
|
|
attn_output = self.out_proj(attn_output) |
|
|
|
|
|
return attn_output, None, past_key_value |
|
|
|
|
|
|
|
|
INDICTRANS_ATTENTION_CLASSES = { |
|
|
"eager": IndicTransAttention, |
|
|
"sdpa": IndicTransSdpaAttention, |
|
|
"flash_attention_2": IndicTransFlashAttention2, |
|
|
} |
|
|
|
|
|
|
|
|
class IndicTransEncoderLayer(nn.Module): |
|
|
def __init__(self, config: IndicTransConfig): |
|
|
super().__init__() |
|
|
self.embed_dim = config.encoder_embed_dim |
|
|
self.self_attn = INDICTRANS_ATTENTION_CLASSES[config._attn_implementation]( |
|
|
embed_dim=self.embed_dim, |
|
|
num_heads=config.encoder_attention_heads, |
|
|
dropout=config.attention_dropout, |
|
|
config=config, |
|
|
) |
|
|
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) |
|
|
self.dropout = config.dropout |
|
|
self.activation_fn = ACT2FN[config.activation_function] |
|
|
self.activation_dropout = config.activation_dropout |
|
|
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) |
|
|
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) |
|
|
self.final_layer_norm = nn.LayerNorm(self.embed_dim) |
|
|
self.normalize_before = config.encoder_normalize_before |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
hidden_states: torch.Tensor, |
|
|
attention_mask: torch.Tensor, |
|
|
layer_head_mask: torch.Tensor, |
|
|
output_attentions: bool = False, |
|
|
) -> torch.Tensor: |
|
|
""" |
|
|
Args: |
|
|
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` |
|
|
attention_mask (`torch.FloatTensor`): attention mask of size |
|
|
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. |
|
|
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size |
|
|
`(encoder_attention_heads,)`. |
|
|
output_attentions (`bool`, *optional*): |
|
|
Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
|
|
returned tensors for more detail. |
|
|
""" |
|
|
residual = hidden_states |
|
|
if self.normalize_before: |
|
|
hidden_states = self.self_attn_layer_norm(hidden_states) |
|
|
hidden_states, attn_weights, _ = self.self_attn( |
|
|
hidden_states=hidden_states, |
|
|
attention_mask=attention_mask, |
|
|
layer_head_mask=layer_head_mask, |
|
|
output_attentions=output_attentions, |
|
|
) |
|
|
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) |
|
|
hidden_states = residual + hidden_states |
|
|
if not self.normalize_before: |
|
|
hidden_states = self.self_attn_layer_norm(hidden_states) |
|
|
|
|
|
residual = hidden_states |
|
|
if self.normalize_before: |
|
|
hidden_states = self.final_layer_norm(hidden_states) |
|
|
hidden_states = self.activation_fn(self.fc1(hidden_states)) |
|
|
hidden_states = F.dropout( |
|
|
hidden_states, p=self.activation_dropout, training=self.training |
|
|
) |
|
|
hidden_states = self.fc2(hidden_states) |
|
|
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) |
|
|
hidden_states = residual + hidden_states |
|
|
if not self.normalize_before: |
|
|
hidden_states = self.final_layer_norm(hidden_states) |
|
|
|
|
|
if hidden_states.dtype == torch.float16 and ( |
|
|
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() |
|
|
): |
|
|
clamp_value = torch.finfo(hidden_states.dtype).max - 1000 |
|
|
hidden_states = torch.clamp( |
|
|
hidden_states, min=-clamp_value, max=clamp_value |
|
|
) |
|
|
|
|
|
outputs = (hidden_states,) |
|
|
|
|
|
if output_attentions: |
|
|
outputs += (attn_weights,) |
|
|
|
|
|
return outputs |
|
|
|
|
|
|
|
|
|
|
|
class IndicTransDecoderLayer(nn.Module): |
|
|
def __init__(self, config: IndicTransConfig): |
|
|
super().__init__() |
|
|
self.embed_dim = config.decoder_embed_dim |
|
|
|
|
|
self.self_attn = INDICTRANS_ATTENTION_CLASSES[config._attn_implementation]( |
|
|
embed_dim=self.embed_dim, |
|
|
num_heads=config.decoder_attention_heads, |
|
|
dropout=config.attention_dropout, |
|
|
is_decoder=True, |
|
|
is_causal=True, |
|
|
config=config, |
|
|
) |
|
|
self.dropout = config.dropout |
|
|
self.activation_fn = ACT2FN[config.activation_function] |
|
|
self.activation_dropout = config.activation_dropout |
|
|
|
|
|
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) |
|
|
self.encoder_attn = INDICTRANS_ATTENTION_CLASSES[config._attn_implementation]( |
|
|
self.embed_dim, |
|
|
config.decoder_attention_heads, |
|
|
dropout=config.attention_dropout, |
|
|
is_decoder=True, |
|
|
config=config, |
|
|
) |
|
|
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) |
|
|
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) |
|
|
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) |
|
|
self.final_layer_norm = nn.LayerNorm(self.embed_dim) |
|
|
self.normalize_before = config.decoder_normalize_before |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
hidden_states: torch.Tensor, |
|
|
attention_mask: Optional[torch.Tensor] = None, |
|
|
encoder_hidden_states: Optional[torch.Tensor] = None, |
|
|
encoder_attention_mask: Optional[torch.Tensor] = None, |
|
|
layer_head_mask: Optional[torch.Tensor] = None, |
|
|
cross_attn_layer_head_mask: Optional[torch.Tensor] = None, |
|
|
past_key_value: Optional[Tuple[torch.Tensor]] = None, |
|
|
output_attentions: Optional[bool] = False, |
|
|
use_cache: Optional[bool] = True, |
|
|
) -> torch.Tensor: |
|
|
""" |
|
|
Args: |
|
|
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` |
|
|
attention_mask (`torch.FloatTensor`): attention mask of size |
|
|
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. |
|
|
encoder_hidden_states (`torch.FloatTensor`): |
|
|
cross attention input to the layer of shape `(batch, seq_len, embed_dim)` |
|
|
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size |
|
|
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. |
|
|
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size |
|
|
`(encoder_attention_heads,)`. |
|
|
cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of |
|
|
size `(decoder_attention_heads,)`. |
|
|
past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states |
|
|
output_attentions (`bool`, *optional*): |
|
|
Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
|
|
returned tensors for more detail. |
|
|
""" |
|
|
residual = hidden_states |
|
|
if self.normalize_before: |
|
|
hidden_states = self.self_attn_layer_norm(hidden_states) |
|
|
|
|
|
|
|
|
|
|
|
self_attn_past_key_value = ( |
|
|
past_key_value[:2] if past_key_value is not None else None |
|
|
) |
|
|
|
|
|
hidden_states, self_attn_weights, present_key_value = self.self_attn( |
|
|
hidden_states=hidden_states, |
|
|
past_key_value=self_attn_past_key_value, |
|
|
attention_mask=attention_mask, |
|
|
layer_head_mask=layer_head_mask, |
|
|
output_attentions=output_attentions, |
|
|
) |
|
|
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) |
|
|
hidden_states = residual + hidden_states |
|
|
if not self.normalize_before: |
|
|
hidden_states = self.self_attn_layer_norm(hidden_states) |
|
|
|
|
|
|
|
|
cross_attn_present_key_value = None |
|
|
cross_attn_weights = None |
|
|
if encoder_hidden_states is not None: |
|
|
residual = hidden_states |
|
|
if self.normalize_before: |
|
|
hidden_states = self.encoder_attn_layer_norm(hidden_states) |
|
|
|
|
|
|
|
|
cross_attn_past_key_value = ( |
|
|
past_key_value[-2:] if past_key_value is not None else None |
|
|
) |
|
|
( |
|
|
hidden_states, |
|
|
cross_attn_weights, |
|
|
cross_attn_present_key_value, |
|
|
) = self.encoder_attn( |
|
|
hidden_states=hidden_states, |
|
|
key_value_states=encoder_hidden_states, |
|
|
attention_mask=encoder_attention_mask, |
|
|
layer_head_mask=cross_attn_layer_head_mask, |
|
|
past_key_value=cross_attn_past_key_value, |
|
|
output_attentions=output_attentions, |
|
|
) |
|
|
hidden_states = F.dropout( |
|
|
hidden_states, p=self.dropout, training=self.training |
|
|
) |
|
|
hidden_states = residual + hidden_states |
|
|
if not self.normalize_before: |
|
|
hidden_states = self.encoder_attn_layer_norm(hidden_states) |
|
|
|
|
|
|
|
|
present_key_value = present_key_value + cross_attn_present_key_value |
|
|
|
|
|
|
|
|
residual = hidden_states |
|
|
if self.normalize_before: |
|
|
hidden_states = self.final_layer_norm(hidden_states) |
|
|
hidden_states = self.activation_fn(self.fc1(hidden_states)) |
|
|
hidden_states = F.dropout( |
|
|
hidden_states, p=self.activation_dropout, training=self.training |
|
|
) |
|
|
hidden_states = self.fc2(hidden_states) |
|
|
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) |
|
|
hidden_states = residual + hidden_states |
|
|
if not self.normalize_before: |
|
|
hidden_states = self.final_layer_norm(hidden_states) |
|
|
|
|
|
outputs = (hidden_states,) |
|
|
|
|
|
if output_attentions: |
|
|
outputs += (self_attn_weights, cross_attn_weights) |
|
|
|
|
|
if use_cache: |
|
|
outputs += (present_key_value,) |
|
|
|
|
|
return outputs |
|
|
|
|
|
|
|
|
|
|
|
class IndicTransPreTrainedModel(PreTrainedModel): |
|
|
config_class = IndicTransConfig |
|
|
base_model_prefix = "model" |
|
|
supports_gradient_checkpointing = True |
|
|
_no_split_modules = ["IndicTransAttention"] |
|
|
|
|
|
def _init_weights(self, module): |
|
|
std = self.config.init_std |
|
|
if isinstance(module, nn.Linear): |
|
|
module.weight.data.normal_(mean=0.0, std=std) |
|
|
if module.bias is not None: |
|
|
module.bias.data.zero_() |
|
|
elif isinstance(module, nn.Embedding): |
|
|
module.weight.data.normal_(mean=0.0, std=std) |
|
|
if module.padding_idx is not None: |
|
|
module.weight.data[module.padding_idx].zero_() |
|
|
|
|
|
def _set_gradient_checkpointing(self, module, value=False): |
|
|
if isinstance(module, (IndicTransDecoder, IndicTransEncoder)): |
|
|
module.gradient_checkpointing = value |
|
|
|
|
|
|
|
|
|
|
|
class IndicTransEncoder(IndicTransPreTrainedModel): |
|
|
""" |
|
|
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a |
|
|
[`IndicTransEncoderLayer`]. |
|
|
|
|
|
Args: |
|
|
config: IndicTransConfig |
|
|
embed_tokens (nn.Embedding): output embedding |
|
|
""" |
|
|
|
|
|
def __init__( |
|
|
self, config: IndicTransConfig, embed_tokens: Optional[nn.Embedding] = None |
|
|
): |
|
|
super().__init__(config) |
|
|
|
|
|
self.dropout = config.dropout |
|
|
self.layerdrop = config.encoder_layerdrop |
|
|
|
|
|
embed_dim = config.encoder_embed_dim |
|
|
self.padding_idx = config.pad_token_id |
|
|
self.max_source_positions = config.max_source_positions |
|
|
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 |
|
|
|
|
|
self.embed_tokens = nn.Embedding( |
|
|
config.encoder_vocab_size, embed_dim, self.padding_idx |
|
|
) |
|
|
|
|
|
if embed_tokens is not None: |
|
|
self.embed_tokens.weight = embed_tokens.weight |
|
|
|
|
|
self.embed_positions = IndicTransSinusoidalPositionalEmbedding( |
|
|
config.max_source_positions, |
|
|
embed_dim, |
|
|
self.padding_idx, |
|
|
) |
|
|
self.layers = nn.ModuleList( |
|
|
[IndicTransEncoderLayer(config) for _ in range(config.encoder_layers)] |
|
|
) |
|
|
self.layer_norm = ( |
|
|
nn.LayerNorm(embed_dim) if config.encoder_normalize_before else None |
|
|
) |
|
|
self.layernorm_embedding = ( |
|
|
nn.LayerNorm(embed_dim) if config.layernorm_embedding else None |
|
|
) |
|
|
|
|
|
self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" |
|
|
self._use_sdpa = config._attn_implementation == "sdpa" |
|
|
|
|
|
self.gradient_checkpointing = False |
|
|
|
|
|
self.post_init() |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
input_ids: Optional[torch.Tensor] = None, |
|
|
attention_mask: Optional[torch.Tensor] = None, |
|
|
head_mask: Optional[torch.Tensor] = None, |
|
|
inputs_embeds: Optional[torch.Tensor] = None, |
|
|
output_attentions: Optional[bool] = None, |
|
|
output_hidden_states: Optional[bool] = None, |
|
|
return_dict: Optional[bool] = None, |
|
|
): |
|
|
r""" |
|
|
Args: |
|
|
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): |
|
|
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you |
|
|
provide it. |
|
|
|
|
|
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
|
|
[`PreTrainedTokenizer.__call__`] for details. |
|
|
|
|
|
[What are input IDs?](../glossary#input-ids) |
|
|
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): |
|
|
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
|
|
|
|
|
- 1 for tokens that are **not masked**, |
|
|
- 0 for tokens that are **masked**. |
|
|
|
|
|
[What are attention masks?](../glossary#attention-mask) |
|
|
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): |
|
|
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: |
|
|
|
|
|
- 1 indicates the head is **not masked**, |
|
|
- 0 indicates the head is **masked**. |
|
|
|
|
|
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): |
|
|
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. |
|
|
This is useful if you want more control over how to convert `input_ids` indices into associated vectors |
|
|
than the model's internal embedding lookup matrix. |
|
|
output_attentions (`bool`, *optional*): |
|
|
Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
|
|
returned tensors for more detail. |
|
|
output_hidden_states (`bool`, *optional*): |
|
|
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors |
|
|
for more detail. |
|
|
return_dict (`bool`, *optional*): |
|
|
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
|
|
""" |
|
|
output_attentions = ( |
|
|
output_attentions |
|
|
if output_attentions is not None |
|
|
else self.config.output_attentions |
|
|
) |
|
|
output_hidden_states = ( |
|
|
output_hidden_states |
|
|
if output_hidden_states is not None |
|
|
else self.config.output_hidden_states |
|
|
) |
|
|
return_dict = ( |
|
|
return_dict if return_dict is not None else self.config.use_return_dict |
|
|
) |
|
|
|
|
|
|
|
|
if input_ids is not None and inputs_embeds is not None: |
|
|
raise ValueError( |
|
|
"You cannot specify both input_ids and inputs_embeds at the same time" |
|
|
) |
|
|
elif input_ids is not None: |
|
|
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) |
|
|
input_shape = input_ids.size() |
|
|
input_ids = input_ids.view(-1, input_shape[-1]) |
|
|
elif inputs_embeds is not None: |
|
|
input_shape = inputs_embeds.size()[:-1] |
|
|
else: |
|
|
raise ValueError("You have to specify either input_ids or inputs_embeds") |
|
|
|
|
|
if inputs_embeds is None: |
|
|
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale |
|
|
|
|
|
embed_pos = self.embed_positions(input_ids, inputs_embeds) |
|
|
embed_pos = embed_pos.to(inputs_embeds.device) |
|
|
|
|
|
hidden_states = inputs_embeds + embed_pos |
|
|
if self.layernorm_embedding is not None: |
|
|
hidden_states = self.layernorm_embedding(hidden_states) |
|
|
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) |
|
|
|
|
|
if attention_mask is not None: |
|
|
if self._use_flash_attention_2: |
|
|
attention_mask = attention_mask if 0 in attention_mask else None |
|
|
elif self._use_sdpa and head_mask is None and not output_attentions: |
|
|
|
|
|
|
|
|
|
|
|
attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype) |
|
|
else: |
|
|
|
|
|
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) |
|
|
|
|
|
|
|
|
encoder_states = () if output_hidden_states else None |
|
|
all_attentions = () if output_attentions else None |
|
|
|
|
|
|
|
|
if head_mask is not None: |
|
|
if head_mask.size()[0] != len(self.layers): |
|
|
raise ValueError( |
|
|
f"The head_mask should be specified for {len(self.layers)} layers, but it is for" |
|
|
f" {head_mask.size()[0]}." |
|
|
) |
|
|
deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() |
|
|
|
|
|
for idx, encoder_layer in enumerate(self.layers): |
|
|
if output_hidden_states: |
|
|
encoder_states = encoder_states + (hidden_states,) |
|
|
|
|
|
|
|
|
dropout_probability = torch.rand([]) |
|
|
|
|
|
skip_the_layer = ( |
|
|
True |
|
|
if self.training and (dropout_probability < self.layerdrop) |
|
|
else False |
|
|
) |
|
|
if not skip_the_layer or deepspeed_zero3_is_enabled: |
|
|
|
|
|
|
|
|
if self.gradient_checkpointing and self.training: |
|
|
|
|
|
def create_custom_forward(module): |
|
|
def custom_forward(*inputs): |
|
|
return module(*inputs, output_attentions) |
|
|
|
|
|
return custom_forward |
|
|
|
|
|
layer_outputs = torch.utils.checkpoint.checkpoint( |
|
|
create_custom_forward(encoder_layer), |
|
|
hidden_states, |
|
|
attention_mask, |
|
|
(head_mask[idx] if head_mask is not None else None), |
|
|
) |
|
|
else: |
|
|
layer_outputs = encoder_layer( |
|
|
hidden_states, |
|
|
attention_mask, |
|
|
layer_head_mask=( |
|
|
head_mask[idx] if head_mask is not None else None |
|
|
), |
|
|
output_attentions=output_attentions, |
|
|
) |
|
|
|
|
|
hidden_states = layer_outputs[0] |
|
|
|
|
|
if skip_the_layer: |
|
|
layer_outputs = (None, None) |
|
|
|
|
|
if output_attentions: |
|
|
all_attentions = all_attentions + (layer_outputs[1],) |
|
|
|
|
|
if self.layer_norm is not None: |
|
|
hidden_states = self.layer_norm(hidden_states) |
|
|
|
|
|
if output_hidden_states: |
|
|
encoder_states = encoder_states + (hidden_states,) |
|
|
|
|
|
if not return_dict: |
|
|
return tuple( |
|
|
v |
|
|
for v in [hidden_states, encoder_states, all_attentions] |
|
|
if v is not None |
|
|
) |
|
|
return BaseModelOutput( |
|
|
last_hidden_state=hidden_states, |
|
|
hidden_states=encoder_states, |
|
|
attentions=all_attentions, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
class IndicTransDecoder(IndicTransPreTrainedModel): |
|
|
""" |
|
|
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`IndicTransDecoderLayer`] |
|
|
|
|
|
Args: |
|
|
config: IndicTransConfig |
|
|
embed_tokens (nn.Embedding): output embedding |
|
|
""" |
|
|
|
|
|
def __init__( |
|
|
self, config: IndicTransConfig, embed_tokens: Optional[nn.Embedding] = None |
|
|
): |
|
|
super().__init__(config) |
|
|
self.dropout = config.dropout |
|
|
self.layerdrop = config.decoder_layerdrop |
|
|
|
|
|
embed_dim = config.encoder_embed_dim |
|
|
self.padding_idx = config.pad_token_id |
|
|
self.max_target_positions = config.max_target_positions |
|
|
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 |
|
|
|
|
|
self.embed_tokens = nn.Embedding( |
|
|
config.decoder_vocab_size, embed_dim, self.padding_idx |
|
|
) |
|
|
|
|
|
if embed_tokens is not None: |
|
|
self.embed_tokens.weight = embed_tokens.weight |
|
|
|
|
|
self.embed_positions = IndicTransSinusoidalPositionalEmbedding( |
|
|
config.max_target_positions, |
|
|
embed_dim, |
|
|
self.padding_idx, |
|
|
) |
|
|
self.layers = nn.ModuleList( |
|
|
[IndicTransDecoderLayer(config) for _ in range(config.decoder_layers)] |
|
|
) |
|
|
self.layer_norm = ( |
|
|
nn.LayerNorm(embed_dim) if config.decoder_normalize_before else None |
|
|
) |
|
|
self.layernorm_embedding = ( |
|
|
nn.LayerNorm(embed_dim) if config.layernorm_embedding else None |
|
|
) |
|
|
|
|
|
self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" |
|
|
self._use_sdpa = config._attn_implementation == "sdpa" |
|
|
|
|
|
self.gradient_checkpointing = False |
|
|
|
|
|
self.post_init() |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
input_ids: Optional[torch.Tensor] = None, |
|
|
attention_mask: Optional[torch.Tensor] = None, |
|
|
encoder_hidden_states: Optional[torch.Tensor] = None, |
|
|
encoder_attention_mask: Optional[torch.Tensor] = None, |
|
|
head_mask: Optional[torch.Tensor] = None, |
|
|
cross_attn_head_mask: Optional[torch.Tensor] = None, |
|
|
past_key_values: Optional[List[torch.FloatTensor]] = None, |
|
|
inputs_embeds: Optional[torch.Tensor] = None, |
|
|
use_cache: Optional[bool] = None, |
|
|
output_attentions: Optional[bool] = None, |
|
|
output_hidden_states: Optional[bool] = None, |
|
|
return_dict: Optional[bool] = None, |
|
|
): |
|
|
r""" |
|
|
Args: |
|
|
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): |
|
|
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you |
|
|
provide it. |
|
|
|
|
|
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
|
|
[`PreTrainedTokenizer.__call__`] for details. |
|
|
|
|
|
[What are input IDs?](../glossary#input-ids) |
|
|
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): |
|
|
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
|
|
|
|
|
- 1 for tokens that are **not masked**, |
|
|
- 0 for tokens that are **masked**. |
|
|
|
|
|
[What are attention masks?](../glossary#attention-mask) |
|
|
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): |
|
|
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention |
|
|
of the decoder. |
|
|
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): |
|
|
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values |
|
|
selected in `[0, 1]`: |
|
|
|
|
|
- 1 for tokens that are **not masked**, |
|
|
- 0 for tokens that are **masked**. |
|
|
|
|
|
[What are attention masks?](../glossary#attention-mask) |
|
|
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): |
|
|
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: |
|
|
|
|
|
- 1 indicates the head is **not masked**, |
|
|
- 0 indicates the head is **masked**. |
|
|
|
|
|
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): |
|
|
Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing |
|
|
cross-attention on hidden heads. Mask values selected in `[0, 1]`: |
|
|
|
|
|
- 1 indicates the head is **not masked**, |
|
|
- 0 indicates the head is **masked**. |
|
|
|
|
|
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): |
|
|
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of |
|
|
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of |
|
|
shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. |
|
|
|
|
|
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the |
|
|
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. |
|
|
|
|
|
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those |
|
|
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of |
|
|
all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of |
|
|
shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing |
|
|
`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more |
|
|
control over how to convert `input_ids` indices into associated vectors than the model's internal |
|
|
embedding lookup matrix. |
|
|
output_attentions (`bool`, *optional*): |
|
|
Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
|
|
returned tensors for more detail. |
|
|
output_hidden_states (`bool`, *optional*): |
|
|
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors |
|
|
for more detail. |
|
|
return_dict (`bool`, *optional*): |
|
|
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
|
|
""" |
|
|
output_attentions = ( |
|
|
output_attentions |
|
|
if output_attentions is not None |
|
|
else self.config.output_attentions |
|
|
) |
|
|
output_hidden_states = ( |
|
|
output_hidden_states |
|
|
if output_hidden_states is not None |
|
|
else self.config.output_hidden_states |
|
|
) |
|
|
use_cache = use_cache if use_cache is not None else self.config.use_cache |
|
|
return_dict = ( |
|
|
return_dict if return_dict is not None else self.config.use_return_dict |
|
|
) |
|
|
|
|
|
|
|
|
if input_ids is not None and inputs_embeds is not None: |
|
|
raise ValueError( |
|
|
"You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time" |
|
|
) |
|
|
elif input_ids is not None: |
|
|
input_shape = input_ids.size() |
|
|
input_ids = input_ids.view(-1, input_shape[-1]) |
|
|
elif inputs_embeds is not None: |
|
|
input_shape = inputs_embeds.size()[:-1] |
|
|
else: |
|
|
raise ValueError( |
|
|
"You have to specify either decoder_input_ids or decoder_inputs_embeds" |
|
|
) |
|
|
|
|
|
|
|
|
past_key_values_length = ( |
|
|
past_key_values[0][0].shape[2] if past_key_values is not None else 0 |
|
|
) |
|
|
|
|
|
if inputs_embeds is None: |
|
|
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale |
|
|
|
|
|
|
|
|
if self._use_flash_attention_2: |
|
|
|
|
|
attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None |
|
|
elif self._use_sdpa and not output_attentions and cross_attn_head_mask is None: |
|
|
|
|
|
|
|
|
attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( |
|
|
attention_mask, |
|
|
input_shape, |
|
|
inputs_embeds, |
|
|
past_key_values_length, |
|
|
) |
|
|
else: |
|
|
|
|
|
attention_mask = _prepare_4d_causal_attention_mask( |
|
|
attention_mask, input_shape, inputs_embeds, past_key_values_length |
|
|
) |
|
|
|
|
|
|
|
|
if encoder_hidden_states is not None and encoder_attention_mask is not None: |
|
|
if self._use_flash_attention_2: |
|
|
encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None |
|
|
elif self._use_sdpa and cross_attn_head_mask is None and not output_attentions: |
|
|
|
|
|
|
|
|
|
|
|
encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa( |
|
|
encoder_attention_mask, |
|
|
inputs_embeds.dtype, |
|
|
tgt_len=input_shape[-1], |
|
|
) |
|
|
else: |
|
|
|
|
|
encoder_attention_mask = _prepare_4d_attention_mask( |
|
|
encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] |
|
|
) |
|
|
|
|
|
|
|
|
positions = self.embed_positions( |
|
|
input_ids, inputs_embeds, past_key_values_length |
|
|
) |
|
|
positions = positions.to(inputs_embeds.device) |
|
|
|
|
|
hidden_states = inputs_embeds + positions |
|
|
if self.layernorm_embedding is not None: |
|
|
hidden_states = self.layernorm_embedding(hidden_states) |
|
|
|
|
|
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) |
|
|
|
|
|
if self.gradient_checkpointing and self.training: |
|
|
if use_cache: |
|
|
logger.warning_once( |
|
|
"`use_cache=True` is incompatible with gradient checkpointing. Setting" |
|
|
" `use_cache=False`..." |
|
|
) |
|
|
use_cache = False |
|
|
|
|
|
|
|
|
all_hidden_states = () if output_hidden_states else None |
|
|
all_self_attns = () if output_attentions else None |
|
|
all_cross_attentions = () if output_attentions else None |
|
|
next_decoder_cache = () if use_cache else None |
|
|
|
|
|
|
|
|
for attn_mask, mask_name in zip( |
|
|
[head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"] |
|
|
): |
|
|
if attn_mask is not None: |
|
|
if attn_mask.size()[0] != len(self.layers): |
|
|
raise ValueError( |
|
|
f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" |
|
|
f" {head_mask.size()[0]}." |
|
|
) |
|
|
deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() |
|
|
|
|
|
for idx, decoder_layer in enumerate(self.layers): |
|
|
if output_hidden_states: |
|
|
all_hidden_states += (hidden_states,) |
|
|
|
|
|
|
|
|
dropout_probability = torch.rand([]) |
|
|
|
|
|
skip_the_layer = ( |
|
|
True |
|
|
if self.training and (dropout_probability < self.layerdrop) |
|
|
else False |
|
|
) |
|
|
if not skip_the_layer or deepspeed_zero3_is_enabled: |
|
|
|
|
|
|
|
|
past_key_value = ( |
|
|
past_key_values[idx] if past_key_values is not None else None |
|
|
) |
|
|
|
|
|
if self.gradient_checkpointing and self.training: |
|
|
|
|
|
def create_custom_forward(module): |
|
|
def custom_forward(*inputs): |
|
|
|
|
|
return module(*inputs, output_attentions, use_cache) |
|
|
|
|
|
return custom_forward |
|
|
|
|
|
layer_outputs = torch.utils.checkpoint.checkpoint( |
|
|
create_custom_forward(decoder_layer), |
|
|
hidden_states, |
|
|
attention_mask, |
|
|
encoder_hidden_states, |
|
|
encoder_attention_mask, |
|
|
head_mask[idx] if head_mask is not None else None, |
|
|
cross_attn_head_mask[idx] |
|
|
if cross_attn_head_mask is not None |
|
|
else None, |
|
|
None, |
|
|
) |
|
|
else: |
|
|
layer_outputs = decoder_layer( |
|
|
hidden_states, |
|
|
attention_mask=attention_mask, |
|
|
encoder_hidden_states=encoder_hidden_states, |
|
|
encoder_attention_mask=encoder_attention_mask, |
|
|
layer_head_mask=( |
|
|
head_mask[idx] if head_mask is not None else None |
|
|
), |
|
|
cross_attn_layer_head_mask=( |
|
|
cross_attn_head_mask[idx] |
|
|
if cross_attn_head_mask is not None |
|
|
else None |
|
|
), |
|
|
past_key_value=past_key_value, |
|
|
output_attentions=output_attentions, |
|
|
use_cache=use_cache, |
|
|
) |
|
|
|
|
|
hidden_states = layer_outputs[0] |
|
|
|
|
|
if skip_the_layer: |
|
|
continue |
|
|
|
|
|
if use_cache: |
|
|
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) |
|
|
|
|
|
if output_attentions: |
|
|
all_self_attns += (layer_outputs[1],) |
|
|
all_cross_attentions += (layer_outputs[2],) |
|
|
|
|
|
if self.layer_norm is not None: |
|
|
hidden_states = self.layer_norm(hidden_states) |
|
|
|
|
|
|
|
|
if output_hidden_states: |
|
|
all_hidden_states += (hidden_states,) |
|
|
|
|
|
next_cache = next_decoder_cache if use_cache else None |
|
|
if not return_dict: |
|
|
return tuple( |
|
|
v |
|
|
for v in [ |
|
|
hidden_states, |
|
|
next_cache, |
|
|
all_hidden_states, |
|
|
all_self_attns, |
|
|
all_cross_attentions, |
|
|
] |
|
|
if v is not None |
|
|
) |
|
|
return BaseModelOutputWithPastAndCrossAttentions( |
|
|
last_hidden_state=hidden_states, |
|
|
past_key_values=next_cache, |
|
|
hidden_states=all_hidden_states, |
|
|
attentions=all_self_attns, |
|
|
cross_attentions=all_cross_attentions, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
class IndicTransModel(IndicTransPreTrainedModel): |
|
|
_tied_weights_keys = None |
|
|
|
|
|
def __init__(self, config: IndicTransConfig): |
|
|
super().__init__(config) |
|
|
|
|
|
self.encoder = IndicTransEncoder(config) |
|
|
self.decoder = IndicTransDecoder(config) |
|
|
|
|
|
|
|
|
self.post_init() |
|
|
|
|
|
def get_encoder(self): |
|
|
return self.encoder |
|
|
|
|
|
def get_decoder(self): |
|
|
return self.decoder |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
input_ids: Optional[torch.LongTensor] = None, |
|
|
attention_mask: Optional[torch.Tensor] = None, |
|
|
decoder_input_ids: Optional[torch.LongTensor] = None, |
|
|
decoder_attention_mask: Optional[torch.LongTensor] = None, |
|
|
head_mask: Optional[torch.Tensor] = None, |
|
|
decoder_head_mask: Optional[torch.Tensor] = None, |
|
|
cross_attn_head_mask: Optional[torch.Tensor] = None, |
|
|
encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, |
|
|
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, |
|
|
inputs_embeds: Optional[torch.FloatTensor] = None, |
|
|
decoder_inputs_embeds: Optional[torch.FloatTensor] = None, |
|
|
use_cache: Optional[bool] = None, |
|
|
output_attentions: Optional[bool] = None, |
|
|
output_hidden_states: Optional[bool] = None, |
|
|
return_dict: Optional[bool] = None, |
|
|
) -> Union[Tuple[torch.Tensor], Seq2SeqModelOutput]: |
|
|
output_attentions = ( |
|
|
output_attentions |
|
|
if output_attentions is not None |
|
|
else self.config.output_attentions |
|
|
) |
|
|
output_hidden_states = ( |
|
|
output_hidden_states |
|
|
if output_hidden_states is not None |
|
|
else self.config.output_hidden_states |
|
|
) |
|
|
use_cache = use_cache if use_cache is not None else self.config.use_cache |
|
|
return_dict = ( |
|
|
return_dict if return_dict is not None else self.config.use_return_dict |
|
|
) |
|
|
|
|
|
if encoder_outputs is None: |
|
|
encoder_outputs = self.encoder( |
|
|
input_ids=input_ids, |
|
|
attention_mask=attention_mask, |
|
|
head_mask=head_mask, |
|
|
inputs_embeds=inputs_embeds, |
|
|
output_attentions=output_attentions, |
|
|
output_hidden_states=output_hidden_states, |
|
|
return_dict=return_dict, |
|
|
) |
|
|
|
|
|
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): |
|
|
encoder_outputs = BaseModelOutput( |
|
|
last_hidden_state=encoder_outputs[0], |
|
|
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, |
|
|
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, |
|
|
) |
|
|
|
|
|
|
|
|
decoder_outputs = self.decoder( |
|
|
input_ids=decoder_input_ids, |
|
|
attention_mask=decoder_attention_mask, |
|
|
encoder_hidden_states=encoder_outputs[0], |
|
|
encoder_attention_mask=attention_mask, |
|
|
head_mask=decoder_head_mask, |
|
|
cross_attn_head_mask=cross_attn_head_mask, |
|
|
past_key_values=past_key_values, |
|
|
inputs_embeds=decoder_inputs_embeds, |
|
|
use_cache=use_cache, |
|
|
output_attentions=output_attentions, |
|
|
output_hidden_states=output_hidden_states, |
|
|
return_dict=return_dict, |
|
|
) |
|
|
|
|
|
if not return_dict: |
|
|
return decoder_outputs + encoder_outputs |
|
|
|
|
|
return Seq2SeqModelOutput( |
|
|
last_hidden_state=decoder_outputs.last_hidden_state, |
|
|
past_key_values=decoder_outputs.past_key_values, |
|
|
decoder_hidden_states=decoder_outputs.hidden_states, |
|
|
decoder_attentions=decoder_outputs.attentions, |
|
|
cross_attentions=decoder_outputs.cross_attentions, |
|
|
encoder_last_hidden_state=encoder_outputs.last_hidden_state, |
|
|
encoder_hidden_states=encoder_outputs.hidden_states, |
|
|
encoder_attentions=encoder_outputs.attentions, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
class IndicTransForConditionalGeneration(IndicTransPreTrainedModel, GenerationMixin): |
|
|
base_model_prefix = "model" |
|
|
_tied_weights_keys = ["decoder.embed_tokens.weight", "lm_head.weight"] |
|
|
_label_smoothing = 0.0 |
|
|
|
|
|
def __init__(self, config: IndicTransConfig): |
|
|
super().__init__(config) |
|
|
self.model = IndicTransModel(config) |
|
|
self.lm_head = nn.Linear( |
|
|
config.decoder_embed_dim, config.decoder_vocab_size, bias=False |
|
|
) |
|
|
|
|
|
self.post_init() |
|
|
|
|
|
def tie_weights(self): |
|
|
if self.config.share_decoder_input_output_embed: |
|
|
self._tie_or_clone_weights(self.model.decoder.embed_tokens, self.lm_head) |
|
|
|
|
|
def get_encoder(self): |
|
|
return self.model.encoder |
|
|
|
|
|
def get_decoder(self): |
|
|
return self.model.decoder |
|
|
|
|
|
def get_input_embeddings(self): |
|
|
return self.model.encoder.embed_tokens |
|
|
|
|
|
def get_output_embeddings(self): |
|
|
return self.lm_head |
|
|
|
|
|
def set_output_embeddings(self, new_embeddings): |
|
|
self.lm_head = new_embeddings |
|
|
|
|
|
def set_label_smoothing(self, label_smoothing): |
|
|
self._label_smoothing = label_smoothing |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
input_ids: Optional[torch.LongTensor] = None, |
|
|
attention_mask: Optional[torch.Tensor] = None, |
|
|
decoder_input_ids: Optional[torch.LongTensor] = None, |
|
|
decoder_attention_mask: Optional[torch.LongTensor] = None, |
|
|
head_mask: Optional[torch.Tensor] = None, |
|
|
decoder_head_mask: Optional[torch.Tensor] = None, |
|
|
cross_attn_head_mask: Optional[torch.Tensor] = None, |
|
|
encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, |
|
|
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, |
|
|
inputs_embeds: Optional[torch.FloatTensor] = None, |
|
|
decoder_inputs_embeds: Optional[torch.FloatTensor] = None, |
|
|
labels: Optional[torch.LongTensor] = None, |
|
|
use_cache: Optional[bool] = None, |
|
|
output_attentions: Optional[bool] = None, |
|
|
output_hidden_states: Optional[bool] = None, |
|
|
return_dict: Optional[bool] = None, |
|
|
) -> Union[Tuple[torch.Tensor], Seq2SeqLMOutput]: |
|
|
r""" |
|
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
|
|
Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., |
|
|
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored |
|
|
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. |
|
|
|
|
|
Returns: |
|
|
""" |
|
|
return_dict = ( |
|
|
return_dict if return_dict is not None else self.config.use_return_dict |
|
|
) |
|
|
|
|
|
if labels is not None: |
|
|
if decoder_input_ids is None: |
|
|
decoder_input_ids = shift_tokens_right( |
|
|
labels, self.config.pad_token_id, self.config.decoder_start_token_id |
|
|
) |
|
|
|
|
|
outputs = self.model( |
|
|
input_ids, |
|
|
attention_mask=attention_mask, |
|
|
decoder_input_ids=decoder_input_ids, |
|
|
encoder_outputs=encoder_outputs, |
|
|
decoder_attention_mask=decoder_attention_mask, |
|
|
head_mask=head_mask, |
|
|
decoder_head_mask=decoder_head_mask, |
|
|
cross_attn_head_mask=cross_attn_head_mask, |
|
|
past_key_values=past_key_values, |
|
|
inputs_embeds=inputs_embeds, |
|
|
decoder_inputs_embeds=decoder_inputs_embeds, |
|
|
use_cache=use_cache, |
|
|
output_attentions=output_attentions, |
|
|
output_hidden_states=output_hidden_states, |
|
|
return_dict=return_dict, |
|
|
) |
|
|
lm_logits = self.lm_head(outputs[0]) |
|
|
|
|
|
masked_lm_loss = None |
|
|
if labels is not None: |
|
|
|
|
|
labels = labels.to(lm_logits.device) |
|
|
masked_lm_loss = F.cross_entropy( |
|
|
input=lm_logits.view(-1, self.config.decoder_vocab_size), |
|
|
target=labels.view(-1), |
|
|
ignore_index=-100, |
|
|
label_smoothing=self._label_smoothing, |
|
|
) |
|
|
|
|
|
if not return_dict: |
|
|
output = (lm_logits,) + outputs[1:] |
|
|
return ( |
|
|
((masked_lm_loss,) + output) if masked_lm_loss is not None else output |
|
|
) |
|
|
|
|
|
return Seq2SeqLMOutput( |
|
|
loss=masked_lm_loss, |
|
|
logits=lm_logits, |
|
|
past_key_values=outputs.past_key_values, |
|
|
decoder_hidden_states=outputs.decoder_hidden_states, |
|
|
decoder_attentions=outputs.decoder_attentions, |
|
|
cross_attentions=outputs.cross_attentions, |
|
|
encoder_last_hidden_state=outputs.encoder_last_hidden_state, |
|
|
encoder_hidden_states=outputs.encoder_hidden_states, |
|
|
encoder_attentions=outputs.encoder_attentions, |
|
|
) |
|
|
|
|
|
def prepare_inputs_for_generation( |
|
|
self, |
|
|
decoder_input_ids, |
|
|
past_key_values=None, |
|
|
attention_mask=None, |
|
|
head_mask=None, |
|
|
decoder_head_mask=None, |
|
|
cross_attn_head_mask=None, |
|
|
use_cache=None, |
|
|
encoder_outputs=None, |
|
|
**kwargs, |
|
|
): |
|
|
|
|
|
if past_key_values is not None: |
|
|
decoder_input_ids = decoder_input_ids[:, -1:] |
|
|
|
|
|
return { |
|
|
"input_ids": None, |
|
|
"encoder_outputs": encoder_outputs, |
|
|
"past_key_values": past_key_values, |
|
|
"decoder_input_ids": decoder_input_ids, |
|
|
"attention_mask": attention_mask, |
|
|
"head_mask": head_mask, |
|
|
"decoder_head_mask": decoder_head_mask, |
|
|
"cross_attn_head_mask": cross_attn_head_mask, |
|
|
"use_cache": use_cache, |
|
|
} |
|
|
|
|
|
@staticmethod |
|
|
def _reorder_cache(past_key_values, beam_idx): |
|
|
reordered_past = () |
|
|
for layer_past in past_key_values: |
|
|
reordered_past += ( |
|
|
tuple( |
|
|
past_state.index_select(0, beam_idx) for past_state in layer_past |
|
|
), |
|
|
) |
|
|
return reordered_past |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|