Pragformer commited on
Commit
aabb225
1 Parent(s): b9844fe

Upload BERT_Arch

Browse files
Files changed (3) hide show
  1. model.py +48 -0
  2. model_config.py +13 -0
  3. pytorch_model.bin +1 -1
model.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModel, AutoConfig
2
+ import torch.nn as nn
3
+ from transformers import BertPreTrainedModel, AutoModel, PreTrainedModel
4
+ from model_config import PragFormerConfig
5
+
6
+
7
+
8
+ class BERT_Arch(PreTrainedModel): #(BertPreTrainedModel):
9
+ config_class = PragFormerConfig
10
+
11
+ def __init__(self, config):
12
+ super().__init__(config)
13
+ print(config.bert)
14
+ self.bert = AutoModel.from_pretrained(config.bert['_name_or_path'])
15
+
16
+ # dropout layer
17
+ self.dropout = nn.Dropout(config.dropout)
18
+
19
+ # relu activation function
20
+ self.relu = nn.ReLU()
21
+
22
+ # dense layer 1
23
+ self.fc1 = nn.Linear(self.config.bert['hidden_size'], config.fc1)
24
+ # self.fc1 = nn.Linear(768, 512)
25
+
26
+ # dense layer 2 (Output layer)
27
+ self.fc2 = nn.Linear(config.fc1, config.fc2)
28
+
29
+ # softmax activation function
30
+ self.softmax = nn.LogSoftmax(dim = config.softmax_dim)
31
+
32
+ # define the forward pass
33
+ def forward(self, input_ids, attention_mask):
34
+ # pass the inputs to the model
35
+ _, cls_hs = self.bert(input_ids, attention_mask = attention_mask, return_dict=False)
36
+
37
+ x = self.fc1(cls_hs)
38
+
39
+ x = self.relu(x)
40
+
41
+ x = self.dropout(x)
42
+
43
+ # output layer
44
+ x = self.fc2(x)
45
+
46
+ # apply softmax activation
47
+ x = self.softmax(x)
48
+ return x
model_config.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+
4
+ class PragFormerConfig(PretrainedConfig):
5
+ model_type = "pragformer"
6
+
7
+ def __init__(self, bert=None, dropout=0.2, fc1=512, fc2=2, softmax_dim=1, **kwargs):
8
+ self.bert = bert
9
+ self.dropout = dropout
10
+ self.fc1 = fc1
11
+ self.fc2 = fc2
12
+ self.softmax_dim = softmax_dim
13
+ super().__init__(**kwargs)
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:85cfcfc2fdd093435961cc76ff9af42408d277f034449c900df6461b5e5f2eb3
3
  size 500230377
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41bd2ef82875fca5466f3b66211eaf9857fb9420d37f9fb904ead5c5a383b356
3
  size 500230377