AbidHasan95 commited on
Commit
9725a75
1 Parent(s): 33fb070

Upload 5 files

Browse files
Files changed (5) hide show
  1. config.json +16 -0
  2. handler.py +92 -0
  3. model3.pth +3 -0
  4. test_bert_config.json +21 -0
  5. vocab.txt +0 -0
config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "attention_probs_dropout_prob": 0.1,
3
+ "hidden_act": "gelu",
4
+ "hidden_dropout_prob": 0.1,
5
+ "hidden_size": 512,
6
+ "initializer_range": 0.02,
7
+ "intermediate_size": 2048,
8
+ "layer_norm_eps": 1e-12,
9
+ "max_position_embeddings": 512,
10
+ "model_type": "bert",
11
+ "num_attention_heads": 8,
12
+ "num_hidden_layers": 2,
13
+ "pad_token_id": 0,
14
+ "type_vocab_size": 2,
15
+ "vocab_size": 30522
16
+ }
handler.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ import torch
3
+ import torch.nn as nn
4
+ from transformers import pipeline, BertModel, AutoTokenizer, PretrainedConfig
5
+
6
+ class EndpointHandler():
7
+ def __init__(self, path=""):
8
+ # self.pipeline = pipeline("text-classification",model=path)
9
+ self.model = CustomModel("test_bert_config.json")
10
+ self.model.load_state_dict(torch.load("model3.pth"))
11
+
12
+ def __call__(self, data: Dict[str, Any])-> List[Dict[str, Any]]:
13
+ """
14
+ data args:
15
+ inputs (:obj: `str`)
16
+ date (:obj: `str`)
17
+ Return:
18
+ A :obj:`list` | `dict`: will be serialized and returned
19
+ """
20
+ # get inputs
21
+ inputs = data.pop("inputs",data)
22
+ # date = data.pop("date", None)
23
+
24
+ # check if date exists and if it is a holiday
25
+ # if date is not None and date in self.holidays:
26
+ # return [{"label": "happy", "score": 1}]
27
+
28
+
29
+ # run normal prediction
30
+ prediction = self.model.classify(inputs)
31
+ return prediction
32
+
33
+ class CustomModel(nn.Module):
34
+ def __init__(self, bert_config):
35
+ super(CustomModel, self).__init__()
36
+ # self.bert = BertModel.from_pretrained(base_model_path)
37
+ self.bert = BertModel._from_config(PretrainedConfig.from_json_file(bert_config))
38
+ self.dropout = nn.Dropout(0.2)
39
+ self.token_classifier = nn.Linear(self.bert.config.hidden_size, 16)
40
+ self.sequence_classifier = nn.Linear(self.bert.config.hidden_size, 7)
41
+
42
+ # Initialize weights
43
+ nn.init.kaiming_normal_(self.token_classifier.weight, mode='fan_in', nonlinearity='linear')
44
+ nn.init.kaiming_normal_(self.sequence_classifier.weight, mode='fan_in', nonlinearity='linear')
45
+ self.seq_labels = [
46
+ "Transaction",
47
+ "Courier",
48
+ "OTP",
49
+ "Expiry",
50
+ "Misc",
51
+ "Tele Marketing",
52
+ "Spam",
53
+ ]
54
+
55
+ self.token_class_labels = [
56
+ 'O',
57
+ 'Courier Service',
58
+ 'Credit',
59
+ 'Date',
60
+ 'Debit',
61
+ 'Email',
62
+ 'Expiry',
63
+ 'Item',
64
+ 'Order ID',
65
+ 'Organization',
66
+ 'OTP',
67
+ 'Phone Number',
68
+ 'Refund',
69
+ 'Time',
70
+ 'Tracking ID',
71
+ 'URL',
72
+ ]
73
+ base_model_path = '.'
74
+ self.tokenizer = AutoTokenizer.from_pretrained(base_model_path)
75
+
76
+ def forward(self, input_ids : torch.Tensor, attention_mask: torch.Tensor, token_type_ids: torch.Tensor):
77
+ outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
78
+ sequence_output, pooled_output = outputs.last_hidden_state, outputs.pooler_output
79
+
80
+ token_logits = self.token_classifier(self.dropout(sequence_output))
81
+ sequence_logits = self.sequence_classifier(self.dropout(pooled_output))
82
+
83
+ return token_logits, sequence_logits
84
+ def classify(self, inputs):
85
+ out = self.tokenizer(inputs, return_tensors="pt")
86
+ token_classification_logits, sequence_logits = self.forward(**out)
87
+ token_classification_logits = token_classification_logits.argmax(2)[0]
88
+ sequence_logits = sequence_logits.argmax(1)[0]
89
+ token_classification_out = [self.token_class_labels[i] for i in token_classification_logits.tolist()]
90
+ seq_classification_out = self.seq_labels[sequence_logits]
91
+ # return token_classification_out, seq_classification_out
92
+ return {"token_classfier":token_classification_out, "sequence_classfier": seq_classification_out}
model3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae032414deee1ccbe1fb94a851f487c1ee0a39649d69c883a4814dd28f01cedc
3
+ size 89896678
test_bert_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert/bert_uncased_L-2_H-512_A-8/",
3
+ "attention_probs_dropout_prob": 0.1,
4
+ "classifier_dropout": null,
5
+ "hidden_act": "gelu",
6
+ "hidden_dropout_prob": 0.1,
7
+ "hidden_size": 512,
8
+ "initializer_range": 0.02,
9
+ "intermediate_size": 2048,
10
+ "layer_norm_eps": 1e-12,
11
+ "max_position_embeddings": 512,
12
+ "model_type": "bert",
13
+ "num_attention_heads": 8,
14
+ "num_hidden_layers": 2,
15
+ "pad_token_id": 0,
16
+ "position_embedding_type": "absolute",
17
+ "transformers_version": "4.38.2",
18
+ "type_vocab_size": 2,
19
+ "use_cache": true,
20
+ "vocab_size": 30522
21
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff