import math from typing import Dict, List, Optional, Set, Tuple, Union # import numpy as np import torch from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers.activations import get_activation from transformers.configuration_utils import PretrainedConfig # from transformers.deepspeed import is_deepspeed_zero3_enabled from transformers.modeling_outputs import ( BaseModelOutput, MaskedLMOutput, # MultipleChoiceModelOutput, # QuestionAnsweringModelOutput, SequenceClassifierOutput, # TokenClassifierOutput, ) from transformers.modeling_utils import PreTrainedModel from transformers.models.distilbert.modeling_distilbert import ( create_sinusoidal_embeddings, DISTILBERT_START_DOCSTRING, DISTILBERT_INPUTS_DOCSTRING, ) from transformers.pytorch_utils import ( apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from transformers.utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, # replace_return_docstrings, ) from .configuration_lddbert import LddBertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "lddbert" _CONFIG_FOR_DOC = "LddBertConfig" _TOKENIZER_FOR_DOC = "LddBertTokenizer" class Embeddings(nn.Module): def __init__(self, config: PretrainedConfig): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.dim, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.dim) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) if config.sinusoidal_pos_embds: create_sinusoidal_embeddings( n_pos=config.max_position_embeddings, dim=config.dim, out=self.position_embeddings.weight ) self.LayerNorm = nn.LayerNorm(config.dim, eps=1e-12) self.dropout = nn.Dropout(config.dropout) if version.parse(torch.__version__) > version.parse("1.6.0"): self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False ) def forward( self, input_ids: torch.Tensor, token_type_ids: Optional[torch.Tensor] = None, ) -> torch.Tensor: """ Parameters: input_ids: torch.tensor(bs, max_seq_length) The token ids to embed. Returns: torch.tensor(bs, max_seq_length, dim) The embedded tokens (plus position embeddings, no token_type embeddings) """ input_shape = input_ids.size() seq_length = input_shape[1] if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if hasattr(self, "position_ids"): position_ids = self.position_ids[:, :seq_length] else: position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) # (max_seq_length) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # (bs, max_seq_length) word_embeddings = self.word_embeddings(input_ids) # (bs, max_seq_length, dim) position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim) token_type_embeddings = self.token_type_embeddings(token_type_ids) # (bs, max_seq_length, dim) embeddings = word_embeddings + position_embeddings + token_type_embeddings # (bs, max_seq_length, dim) embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim) embeddings = self.dropout(embeddings) # (bs, max_seq_length, dim) return embeddings class MultiHeadSelfAttention(nn.Module): def __init__(self, config: PretrainedConfig): super().__init__() self.n_heads = config.n_heads self.dim = config.dim self.dropout = nn.Dropout(p=config.attention_dropout) assert self.dim % self.n_heads == 0 self.q_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.k_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.v_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.out_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.pruned_heads: Set[int] = set() def prune_heads(self, heads: List[int]): attention_head_size = self.dim // self.n_heads if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, attention_head_size, self.pruned_heads) # Prune linear layers self.q_lin = prune_linear_layer(self.q_lin, index) self.k_lin = prune_linear_layer(self.k_lin, index) self.v_lin = prune_linear_layer(self.v_lin, index) self.out_lin = prune_linear_layer(self.out_lin, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.dim = attention_head_size * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, ...]: """ Parameters: query: torch.tensor(bs, seq_length, dim) key: torch.tensor(bs, seq_length, dim) value: torch.tensor(bs, seq_length, dim) mask: torch.tensor(bs, seq_length) Returns: weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs, seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True` """ bs, q_length, dim = query.size() k_length = key.size(1) # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured' # assert key.size() == value.size() dim_per_head = self.dim // self.n_heads mask_reshp = (bs, 1, 1, k_length) def shape(x: torch.Tensor) -> torch.Tensor: """separate heads""" return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2) def unshape(x: torch.Tensor) -> torch.Tensor: """group heads""" return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head) q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head) k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head) v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head) q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head) scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, q_length, k_length) mask = (mask == 0).view(mask_reshp).expand_as(scores) # (bs, n_heads, q_length, k_length) scores = scores.masked_fill(mask, -float("inf")) # (bs, n_heads, q_length, k_length) weights = nn.functional.softmax(scores, dim=-1) # (bs, n_heads, q_length, k_length) weights = self.dropout(weights) # (bs, n_heads, q_length, k_length) # Mask heads if we want to if head_mask is not None: weights = weights * head_mask context = torch.matmul(weights, v) # (bs, n_heads, q_length, dim_per_head) context = unshape(context) # (bs, q_length, dim) context = self.out_lin(context) # (bs, q_length, dim) if output_attentions: return (context, weights) else: return (context,) class FFN(nn.Module): def __init__(self, config: PretrainedConfig): super().__init__() self.dropout = nn.Dropout(p=config.dropout) self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.lin1 = nn.Linear(in_features=config.dim, out_features=config.hidden_dim) self.lin2 = nn.Linear(in_features=config.hidden_dim, out_features=config.dim) self.activation = get_activation(config.activation) def forward(self, input: torch.Tensor) -> torch.Tensor: return apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, input) def ff_chunk(self, input: torch.Tensor) -> torch.Tensor: x = self.lin1(input) x = self.activation(x) x = self.lin2(x) x = self.dropout(x) return x class TransformerBlock(nn.Module): def __init__(self, config: PretrainedConfig): super().__init__() assert config.dim % config.n_heads == 0 self.attention = MultiHeadSelfAttention(config) self.sa_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12) self.ffn = FFN(config) self.output_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12) def forward( self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, ...]: """ Parameters: x: torch.tensor(bs, seq_length, dim) attn_mask: torch.tensor(bs, seq_length) Returns: sa_weights: torch.tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output: torch.tensor(bs, seq_length, dim) The output of the transformer block contextualization. """ # Self-Attention sa_output = self.attention( query=x, key=x, value=x, mask=attn_mask, head_mask=head_mask, output_attentions=output_attentions, ) if output_attentions: sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length) else: # To handle these `output_attentions` or `output_hidden_states` cases returning tuples assert type(sa_output) == tuple sa_output = sa_output[0] sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim) # Feed Forward Network ffn_output = self.ffn(sa_output) # (bs, seq_length, dim) ffn_output: torch.Tensor = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim) output = (ffn_output,) if output_attentions: output = (sa_weights,) + output return output class Transformer(nn.Module): def __init__(self, config: PretrainedConfig): super().__init__() self.n_layers = config.n_layers self.layer = nn.ModuleList([TransformerBlock(config) for _ in range(config.n_layers)]) def forward( self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: Optional[bool] = None, ) -> Union[BaseModelOutput, Tuple[torch.Tensor, ...]]: # docstyle-ignore """ Parameters: x: torch.tensor(bs, seq_length, dim) Input sequence embedded. attn_mask: torch.tensor(bs, seq_length) Attention mask on the sequence. Returns: hidden_state: torch.tensor(bs, seq_length, dim) Sequence of hidden states in the last (top) layer all_hidden_states: Tuple[torch.tensor(bs, seq_length, dim)] Tuple of length n_layers with the hidden states from each layer. Optional: only if output_hidden_states=True all_attentions: Tuple[torch.tensor(bs, n_heads, seq_length, seq_length)] Tuple of length n_layers with the attention weights from each layer Optional: only if output_attentions=True """ all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_state = x for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_state,) layer_outputs = layer_module( x=hidden_state, attn_mask=attn_mask, head_mask=head_mask[i], output_attentions=output_attentions ) hidden_state = layer_outputs[-1] if output_attentions: assert len(layer_outputs) == 2 attentions = layer_outputs[0] all_attentions = all_attentions + (attentions,) else: assert len(layer_outputs) == 1 # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions ) class LddBertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LddBertConfig load_tf_weights = None base_model_prefix = "lddbert" def _init_weights(self, module: nn.Module): """Initialize the weights.""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_( mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_( mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) LDDBERT_START_DOCSTRING = DISTILBERT_START_DOCSTRING LDDBERT_INPUTS_DOCSTRING = DISTILBERT_INPUTS_DOCSTRING @add_start_docstrings( "The bare LddBERT encoder/transformer outputting raw hidden-states without any specific head on top.", LDDBERT_START_DOCSTRING, ) class LddBertModel(LddBertPreTrainedModel): def __init__(self, config: PretrainedConfig): super().__init__(config) assert config.cnn_kernel_size%2 == 1 self.embeddings = Embeddings(config) # Embeddings self.transformer = Transformer(config) # Encoder self.gru = nn.GRU(config.dim , config.dim//2, config.n_gru_layers, batch_first=True, bidirectional=True) self.activation_cnn = get_activation('relu') self.cnn = nn.ModuleList([ nn.Sequential( nn.Conv2d(in_channels=1, out_channels=1, kernel_size=config.cnn_kernel_size, padding=(config.cnn_kernel_size-1)//2), self.activation_cnn ) for _ in range(config.n_cnn_layers) ]) # Initialize weights and apply final processing self.post_init() def get_position_embeddings(self) -> nn.Embedding: """ Returns the position embeddings """ return self.embeddings.position_embeddings def resize_position_embeddings(self, new_num_position_embeddings: int): """ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`. Arguments: new_num_position_embeddings (`int`): The number of new position embedding matrix. If position embeddings are learned, increasing the size will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will add correct vectors at the end following the position encoding algorithm, whereas reducing the size will remove vectors from the end. """ num_position_embeds_diff = new_num_position_embeddings - self.config.max_position_embeddings # no resizing needs to be done if the length stays the same if num_position_embeds_diff == 0: return logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...") self.config.max_position_embeddings = new_num_position_embeddings old_position_embeddings_weight = self.embeddings.position_embeddings.weight.clone() self.embeddings.position_embeddings = nn.Embedding(self.config.max_position_embeddings, self.config.dim) if self.config.sinusoidal_pos_embds: create_sinusoidal_embeddings( n_pos=self.config.max_position_embeddings, dim=self.config.dim, out=self.position_embeddings.weight ) else: with torch.no_grad(): if num_position_embeds_diff > 0: self.embeddings.position_embeddings.weight[:-num_position_embeds_diff] = nn.Parameter( old_position_embeddings_weight ) else: self.embeddings.position_embeddings.weight = nn.Parameter( old_position_embeddings_weight[:num_position_embeds_diff] ) # move position_embeddings to correct device self.embeddings.position_embeddings.to(self.device) def get_input_embeddings(self) -> nn.Embedding: return self.embeddings.word_embeddings def set_input_embeddings(self, new_embeddings: nn.Embedding): self.embeddings.word_embeddings = new_embeddings def _prune_heads(self, heads_to_prune: Dict[int, List[List[int]]]): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.transformer.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LDDBERT_INPUTS_DOCSTRING.format("batch_size, num_choices")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[BaseModelOutput, Tuple[torch.Tensor, ...]]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) # (bs, seq_length) # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) if inputs_embeds is None: inputs_embeds = self.embeddings( input_ids=input_ids, token_type_ids=token_type_ids, ) # (bs, seq_length, dim) bert_output = self.transformer( x=inputs_embeds, attn_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) gru_output, _ = self.gru(bert_output[0]) cnn_output = bert_output[0].view(input_shape[0], 1, input_shape[1], -1) for i, layer_module in enumerate(self.cnn): cnn_output = layer_module(cnn_output) cnn_output = cnn_output.view(input_shape[0], input_shape[1], -1) output = gru_output + cnn_output if not return_dict: return (output, ) + bert_output[1:] return BaseModelOutput( last_hidden_state=output, hidden_states=bert_output.hidden_states, attentions=bert_output.attentions, ) @add_start_docstrings( """LddBert Model with a `masked language modeling` head on top.""", LDDBERT_START_DOCSTRING, ) class LddBertForMaskedLM(LddBertPreTrainedModel): def __init__(self, config: PretrainedConfig): super().__init__(config) self.activation = get_activation(config.activation) self.lddbert = LddBertModel(config) self.vocab_transform = nn.Linear(config.dim, config.dim) self.vocab_layer_norm = nn.LayerNorm(config.dim, eps=1e-12) self.vocab_projector = nn.Linear(config.dim, config.vocab_size) # Initialize weights and apply final processing self.post_init() self.mlm_loss_fct = nn.CrossEntropyLoss() def get_position_embeddings(self) -> nn.Embedding: """ Returns the position embeddings """ return self.lddbert.get_position_embeddings() def resize_position_embeddings(self, new_num_position_embeddings: int): """ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`. Arguments: new_num_position_embeddings (`int`): The number of new position embedding matrix. If position embeddings are learned, increasing the size will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will add correct vectors at the end following the position encoding algorithm, whereas reducing the size will remove vectors from the end. """ self.lddbert.resize_position_embeddings(new_num_position_embeddings) def get_output_embeddings(self) -> nn.Module: return self.vocab_projector def set_output_embeddings(self, new_embeddings: nn.Module): self.vocab_projector = new_embeddings @add_start_docstrings_to_model_forward(LDDBERT_INPUTS_DOCSTRING.format("batch_size, num_choices")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[MaskedLMOutput, Tuple[torch.Tensor, ...]]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict lddbert_output = self.lddbert( input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = lddbert_output[0] # (bs, seq_length, dim) prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim) prediction_logits = self.activation(prediction_logits) # (bs, seq_length, dim) prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim) prediction_logits = self.vocab_projector(prediction_logits) # (bs, seq_length, vocab_size) mlm_loss = None if labels is not None: mlm_loss = self.mlm_loss_fct(prediction_logits.view(-1, prediction_logits.size(-1)), labels.view(-1)) if not return_dict: output = (prediction_logits,) + lddbert_output[1:] return ((mlm_loss,) + output) if mlm_loss is not None else output return MaskedLMOutput( loss=mlm_loss, logits=prediction_logits, hidden_states=lddbert_output.hidden_states, attentions=lddbert_output.attentions, ) @add_start_docstrings( """ LddBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, LDDBERT_START_DOCSTRING, ) class LddBertForSequenceClassification(LddBertPreTrainedModel): def __init__(self, config: PretrainedConfig): super().__init__(config) self.num_labels = config.num_labels self.config = config self.lddbert = LddBertModel(config) self.pre_classifier = nn.Linear(config.dim, config.dim) self.activation = get_activation(config.activation) self.dropout = nn.Dropout(config.seq_classif_dropout) self.classifier = nn.Linear(config.dim, config.num_labels) # Initialize weights and apply final processing self.post_init() def get_position_embeddings(self) -> nn.Embedding: """Returns the position embeddings""" return self.lddbert.get_position_embeddings() def resize_position_embeddings(self, new_num_position_embeddings: int): """ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`. Arguments: new_num_position_embeddings (`int`): The number of new position embedding matrix. If position embeddings are learned, increasing the size will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will add correct vectors at the end following the position encoding algorithm, whereas reducing the size will remove vectors from the end. """ self.lddbert.resize_position_embeddings(new_num_position_embeddings) @add_start_docstrings_to_model_forward(LDDBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[SequenceClassifierOutput, Tuple[torch.Tensor, ...]]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict lddbert_output = self.lddbert( input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_state = lddbert_output[0] # (bs, seq_len, dim) pooled_output = hidden_state[:, 0] # (bs, dim) pooled_output = self.pre_classifier(pooled_output) # (bs, dim) pooled_output = self.activation(pooled_output) # (bs, dim) pooled_output = self.dropout(pooled_output) # (bs, dim) logits = self.classifier(pooled_output) # (bs, num_labels) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + lddbert_output[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=lddbert_output.hidden_states, attentions=lddbert_output.attentions, )