nicholasKluge commited on
Commit
5ac5a85
1 Parent(s): b5287cd

Delete toxicity_model.py

Browse files
Files changed (1) hide show
  1. toxicity_model.py +0 -47
toxicity_model.py DELETED
@@ -1,47 +0,0 @@
1
- from transformers import BertPreTrainedModel, BertModel
2
- from .toxicity_model_config import ToxicityModelConfig
3
- import torch
4
-
5
- class ToxicityModel(BertPreTrainedModel):
6
- """
7
- ToxicityModel class for PyTorch
8
-
9
- Args:
10
- config (transformers.configuration): model configuration
11
-
12
- Returns:
13
- output (torch.tensor): tensor containing the output logits [-1,1]
14
- """
15
- config_class = ToxicityModelConfig
16
-
17
- def __init__(self, config):
18
- super().__init__(config)
19
- self.bert = BertModel(config)
20
-
21
- self.cls_layer1 = torch.nn.Linear(config.hidden_size,config.linear_layer)
22
- self.relu1 = torch.nn.ReLU()
23
- self.ff1 = torch.nn.Linear(config.linear_layer,config.linear_layer)
24
- self.tanh1 = torch.nn.Tanh()
25
- self.ff2 = torch.nn.Linear(config.linear_layer,config.linear_layer_output)
26
-
27
- def forward(self, input_ids, attention_mask, alpha=1, beta=1e-5):
28
-
29
- outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
30
-
31
- logits = outputs.last_hidden_state[:,0,:]
32
- output = self.cls_layer1(logits)
33
- output = self.relu1(output)
34
- output = self.ff1(output)
35
- output = self.tanh1(output)
36
- output = self.ff2(output)
37
-
38
- # Apply alpha and beta to output (if not training)
39
- if not self.training:
40
-
41
- # alpha multiplies the output by a scalar
42
- output = torch.mul(output, alpha)
43
-
44
- # beta clamps the output to a minimum value
45
- output = torch.clamp(output, min=beta)
46
-
47
- return output