yutingg commited on
Commit
b975363
·
1 Parent(s): c8b2943

Upload DistillBERTClassClarity

Browse files
config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DistillBERTClassClarity"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "configuration_essay_clarity.DistillBERTClassClarityConfig",
7
+ "AutoModel": "modeling_essay_clarity.DistillBERTClassClarity"
8
+ },
9
+ "model_type": "distill_bert_clarity",
10
+ "torch_dtype": "float32",
11
+ "transformers_version": "4.24.0"
12
+ }
configuration_essay_clarity.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+
4
+ class DistillBERTClassClarityConfig(PretrainedConfig):
5
+ model_type = "distill_bert_clarity"
6
+
7
+ def __init__(
8
+ self,
9
+ **kwargs,
10
+ ):
11
+ super().__init__(**kwargs)
modeling_essay_clarity.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Creating the customized model, by adding a drop out and a dense layer on top of distil bert to get the final output for the model.
2
+ # Importing the libraries needed
3
+ import torch
4
+ import transformers
5
+ from torch.utils.data import Dataset, DataLoader
6
+ from transformers import DistilBertModel, DistilBertTokenizer, PreTrainedModel
7
+ from configuration_essay_clarity import DistillBERTClassClarityConfig
8
+
9
+
10
+ class DistillBERTClassClarity(PreTrainedModel):
11
+ config_class = DistillBERTClassClarityConfig
12
+
13
+ def __init__(self, config):
14
+ super().__init__(config)
15
+ self.l1 = DistilBertModel.from_pretrained("distilbert-base-uncased")
16
+ self.pre_classifier = torch.nn.Linear(768, 768)
17
+ self.dropout = torch.nn.Dropout(0.3)
18
+ self.classifier = torch.nn.Linear(768, 1)
19
+ # https://glassboxmedicine.com/2019/05/26/classification-sigmoid-vs-softmax/
20
+ # self.softmax = torch.nn.Softmax(dim=1)
21
+ # self.sigmoid = torch.nn.Sigmoid() # apply sigmoid on vector of 1*4
22
+
23
+ def forward(self, ids=None, mask=None):
24
+ output_1 = self.l1(input_ids=ids, attention_mask=mask)
25
+ hidden_state = output_1[0]
26
+ pooler = hidden_state[:, 0]
27
+ pooler = self.pre_classifier(pooler)
28
+ pooler = torch.nn.ReLU()(pooler)
29
+ pooler = self.dropout(pooler)
30
+ output = self.classifier(pooler)
31
+ # output = self.sigmoid(output)
32
+ return output
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50b5288e6f41cfb080d462347bd811f822d491995317cb24d5f90e1b00767a9d
3
+ size 267850221