File size: 1,220 Bytes
a3b24ec 6352644 a3b24ec 2e14ea0 a3b24ec 2e14ea0 a3b24ec 2e14ea0 a3b24ec |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
from transformers import PreTrainedModel, AutoModel
import torch.nn as nn
import torch
from .configuration_deberta_arg_classifier import DebertaConfig
class DebertaArgClassifier(PreTrainedModel):
config_class = DebertaConfig
def __init__(self, config):
super().__init__(config)
self.bert = AutoModel.from_pretrained("microsoft/deberta-large")
self.classifier = nn.Linear(self.bert.config.hidden_size, config.num_labels)
self.criterion = nn.BCEWithLogitsLoss()
def forward(self, input_ids, attention_mask, labels=None):
output = self.bert(input_ids, attention_mask=attention_mask)
output = self._cls_embeddings(output)
output_cls = self.classifier(output)
output = torch.sigmoid(output_cls)
loss = None
if labels is not None:
loss = self.cirterion(output_cls, labels)
return {"loss": loss, "output": output}
return {"loss": loss, "output": output}
def _cls_embeddings(self, output):
'''Returns the embeddings corresponding to the <CLS> token of each text. '''
last_hidden_state = output[0]
cls_embeddings = last_hidden_state[:, 0]
return cls_embeddings |