Lurunchik commited on
Commit
e8292ed
1 Parent(s): bf1ebb5

black formatting

Browse files
Files changed (1) hide show
  1. nfqa_model.py +19 -27
nfqa_model.py CHANGED
@@ -1,11 +1,11 @@
1
- from typing import Sequence, Optional, Union, Tuple
2
 
3
  import torch
4
  from torch import nn
5
- from torch.nn import functional, CrossEntropyLoss
6
  from transformers import RobertaConfig
7
  from transformers.modeling_outputs import SequenceClassifierOutput
8
- from transformers.models.roberta.modeling_roberta import RobertaModel, RobertaPreTrainedModel, RobertaPooler
9
 
10
 
11
  class MishActivation(nn.Module):
@@ -15,28 +15,24 @@ class MishActivation(nn.Module):
15
 
16
  class NFQAClassificationHead(nn.Module):
17
  def __init__(
18
- self, input_dim: int, num_labels: int, hidden_dims: Sequence[int] = (768, 512), dropout: float = 0.0,
19
  ) -> None:
20
  super().__init__()
21
 
22
- self.linear_layers = nn.Sequential(
23
- *(nn.Linear(input_dim, dim) for dim in hidden_dims)
24
- )
25
  self.classification_layer = torch.nn.Linear(hidden_dims[-1], num_labels)
26
  self.activations = [MishActivation()] * len(hidden_dims)
27
  self.dropouts = [torch.nn.Dropout(p=dropout)] * len(hidden_dims)
28
 
29
  def forward(self, inputs: torch.Tensor) -> torch.Tensor:
30
  output = inputs
31
- for layer, activation, dropout in zip(
32
- self.linear_layers, self.activations, self.dropouts
33
- ):
34
  output = dropout(activation(layer(output)))
35
  return self.classification_layer(output)
36
 
37
 
38
  class RobertaNFQAClassification(RobertaPreTrainedModel):
39
- _keys_to_ignore_on_load_missing = [r"position_ids"]
40
  _DROPOUT = 0.0
41
 
42
  def __init__(self, config: RobertaConfig):
@@ -51,19 +47,18 @@ class RobertaNFQAClassification(RobertaPreTrainedModel):
51
 
52
  self.init_weights()
53
 
54
-
55
  def forward(
56
- self,
57
- input_ids: Optional[torch.LongTensor] = None,
58
- attention_mask: Optional[torch.FloatTensor] = None,
59
- token_type_ids: Optional[torch.LongTensor] = None,
60
- position_ids: Optional[torch.LongTensor] = None,
61
- head_mask: Optional[torch.FloatTensor] = None,
62
- inputs_embeds: Optional[torch.FloatTensor] = None,
63
- labels: Optional[torch.LongTensor] = None,
64
- output_attentions: Optional[bool] = None,
65
- output_hidden_states: Optional[bool] = None,
66
- return_dict: Optional[bool] = None,
67
  ) -> Union[Tuple[torch.Tensor, ...], SequenceClassifierOutput]:
68
  r"""
69
  labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
@@ -98,8 +93,5 @@ class RobertaNFQAClassification(RobertaPreTrainedModel):
98
  return ((loss,) + output) if loss is not None else output
99
 
100
  return SequenceClassifierOutput(
101
- loss=loss,
102
- logits=logits,
103
- hidden_states=outputs.hidden_states,
104
- attentions=outputs.attentions,
105
  )
1
+ from typing import Optional, Sequence, Tuple, Union
2
 
3
  import torch
4
  from torch import nn
5
+ from torch.nn import CrossEntropyLoss, functional
6
  from transformers import RobertaConfig
7
  from transformers.modeling_outputs import SequenceClassifierOutput
8
+ from transformers.models.roberta.modeling_roberta import RobertaModel, RobertaPooler, RobertaPreTrainedModel
9
 
10
 
11
  class MishActivation(nn.Module):
15
 
16
  class NFQAClassificationHead(nn.Module):
17
  def __init__(
18
+ self, input_dim: int, num_labels: int, hidden_dims: Sequence[int] = (768, 512), dropout: float = 0.0,
19
  ) -> None:
20
  super().__init__()
21
 
22
+ self.linear_layers = nn.Sequential(*(nn.Linear(input_dim, dim) for dim in hidden_dims))
 
 
23
  self.classification_layer = torch.nn.Linear(hidden_dims[-1], num_labels)
24
  self.activations = [MishActivation()] * len(hidden_dims)
25
  self.dropouts = [torch.nn.Dropout(p=dropout)] * len(hidden_dims)
26
 
27
  def forward(self, inputs: torch.Tensor) -> torch.Tensor:
28
  output = inputs
29
+ for layer, activation, dropout in zip(self.linear_layers, self.activations, self.dropouts):
 
 
30
  output = dropout(activation(layer(output)))
31
  return self.classification_layer(output)
32
 
33
 
34
  class RobertaNFQAClassification(RobertaPreTrainedModel):
35
+ _keys_to_ignore_on_load_missing = [r'position_ids']
36
  _DROPOUT = 0.0
37
 
38
  def __init__(self, config: RobertaConfig):
47
 
48
  self.init_weights()
49
 
 
50
  def forward(
51
+ self,
52
+ input_ids: Optional[torch.LongTensor] = None,
53
+ attention_mask: Optional[torch.FloatTensor] = None,
54
+ token_type_ids: Optional[torch.LongTensor] = None,
55
+ position_ids: Optional[torch.LongTensor] = None,
56
+ head_mask: Optional[torch.FloatTensor] = None,
57
+ inputs_embeds: Optional[torch.FloatTensor] = None,
58
+ labels: Optional[torch.LongTensor] = None,
59
+ output_attentions: Optional[bool] = None,
60
+ output_hidden_states: Optional[bool] = None,
61
+ return_dict: Optional[bool] = None,
62
  ) -> Union[Tuple[torch.Tensor, ...], SequenceClassifierOutput]:
63
  r"""
64
  labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
93
  return ((loss,) + output) if loss is not None else output
94
 
95
  return SequenceClassifierOutput(
96
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions,
 
 
 
97
  )