GradApplicationDocuments commited on
Commit
7dfc7aa
1 Parent(s): 286ab7c

Create custom_models.py

Browse files
Files changed (1) hide show
  1. custom_models.py +87 -0
custom_models.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ from transformers import PreTrainedModel, PretrainedConfig, DistilBertModel, BertModel
3
+ import torch
4
+ from torch import nn
5
+
6
+
7
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
8
+
9
+ class TransformerBasedModelDistilBert(nn.Module):
10
+ def __init__(self):
11
+ super(TransformerBasedModelDistilBert, self).__init__()
12
+ self.bert = DistilBertModel.from_pretrained('distilbert-base-uncased')
13
+ self.dropout = nn.Dropout(0.55)
14
+ self.fc = nn.Linear(768, 2)
15
+
16
+ def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None):
17
+ input_shape = input_ids.size()
18
+ if attention_mask is None:
19
+ attention_mask = torch.ones(input_shape, device=device)
20
+
21
+ outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
22
+ pooled_output = outputs.last_hidden_state[:, 0, :]
23
+ pooled_output = self.dropout(pooled_output)
24
+ logits = self.fc(pooled_output)
25
+ return logits
26
+
27
+ class TransformerBasedModelBert(nn.Module):
28
+ def __init__(self):
29
+ super(TransformerBasedModelBert, self).__init__()
30
+ self.bert = BertModel.from_pretrained('bert-base-uncased')
31
+ self.dropout = nn.Dropout(0.55)
32
+ self.fc = nn.Linear(768, 2)
33
+
34
+ def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None):
35
+ input_shape = input_ids.size()
36
+ if attention_mask is None:
37
+ attention_mask = torch.ones(input_shape, device=device)
38
+
39
+ outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
40
+ pooled_output = outputs[1]
41
+ pooled_output = self.dropout(pooled_output)
42
+ logits = self.fc(pooled_output)
43
+ return logits
44
+
45
+ class MyConfigDistil(PretrainedConfig):
46
+ model_type = "distilbert"
47
+ def __init__(self, final_dropout=0.55, **kwargs):
48
+ super().__init__(**kwargs)
49
+ self.final_dropout = final_dropout
50
+
51
+ class MyConfig(PretrainedConfig):
52
+ model_type = "bert"
53
+ def __init__(self, final_dropout=0.55, **kwargs):
54
+ super().__init__(**kwargs)
55
+ self.final_dropout = final_dropout
56
+
57
+ class MyHFModel_DistilBertBased(PreTrainedModel):
58
+ config_class = MyConfigDistil
59
+ def __init__(self, config):
60
+ super().__init__(config)
61
+ self.config = config
62
+ self.model = TransformerBasedModelDistilBert()
63
+ def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None):
64
+ input_shape = input_ids.size()
65
+ if attention_mask is None:
66
+ attention_mask = torch.ones(input_shape, device=device)
67
+
68
+ return self.model(input_ids=input_ids, attention_mask=attention_mask)
69
+
70
+ class MyHFModel_BertBased(PreTrainedModel):
71
+ config_class = MyConfig
72
+ def __init__(self, config):
73
+ super().__init__(config)
74
+ self.config = config
75
+ self.model = TransformerBasedModelBert()
76
+ def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None):
77
+ input_shape = input_ids.size()
78
+ if attention_mask is None:
79
+ attention_mask = torch.ones(input_shape, device=device)
80
+
81
+ return self.model(input_ids=input_ids, attention_mask=attention_mask)
82
+
83
+ config = MyConfigDistil(0.55)
84
+ HF_DistilBertBasedModelAppDocs = MyHFModel_DistilBertBased(config)
85
+
86
+ config_db = MyConfig(0.55)
87
+ HF_BertBasedModelAppDocs = MyHFModel_BertBased(config_db)