Frederic Marvin Abraham commited on
Commit
35f403b
1 Parent(s): 4772b8b

update handler

Browse files
Files changed (2) hide show
  1. .gitattributes +0 -2
  2. handler.py +2 -1
.gitattributes CHANGED
@@ -17,7 +17,6 @@ model.safetensors filter=lfs diff=lfs merge=lfs -text
17
  *.ckpt filter=lfs diff=lfs merge=lfs -text
18
  *.ftz filter=lfs diff=lfs merge=lfs -text
19
  *.gz filter=lfs diff=lfs merge=lfs -text
20
- *.h5 filter=lfs diff=lfs merge=lfs -text
21
  *.joblib filter=lfs diff=lfs merge=lfs -text
22
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
23
  *.mlmodel filter=lfs diff=lfs merge=lfs -text
@@ -26,7 +25,6 @@ model.safetensors filter=lfs diff=lfs merge=lfs -text
26
  *.npy filter=lfs diff=lfs merge=lfs -text
27
  *.npz filter=lfs diff=lfs merge=lfs -text
28
  *.onnx filter=lfs diff=lfs merge=lfs -text
29
- *.ot filter=lfs diff=lfs merge=lfs -text
30
  *.parquet filter=lfs diff=lfs merge=lfs -text
31
  *.pb filter=lfs diff=lfs merge=lfs -text
32
  *.pickle filter=lfs diff=lfs merge=lfs -text
 
17
  *.ckpt filter=lfs diff=lfs merge=lfs -text
18
  *.ftz filter=lfs diff=lfs merge=lfs -text
19
  *.gz filter=lfs diff=lfs merge=lfs -text
 
20
  *.joblib filter=lfs diff=lfs merge=lfs -text
21
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
22
  *.mlmodel filter=lfs diff=lfs merge=lfs -text
 
25
  *.npy filter=lfs diff=lfs merge=lfs -text
26
  *.npz filter=lfs diff=lfs merge=lfs -text
27
  *.onnx filter=lfs diff=lfs merge=lfs -text
 
28
  *.parquet filter=lfs diff=lfs merge=lfs -text
29
  *.pb filter=lfs diff=lfs merge=lfs -text
30
  *.pickle filter=lfs diff=lfs merge=lfs -text
handler.py CHANGED
@@ -5,7 +5,8 @@ from transformers import BertModel, BertTokenizerFast
5
 
6
 
7
  class EndpointHandler():
8
- def __init__(self, path_to_model: str):
 
9
  # Preload all the elements you are going to need at inference.
10
  # pseudo:
11
  self.tokenizer = BertTokenizerFast.from_pretrained(path_to_model)
 
5
 
6
 
7
  class EndpointHandler():
8
+
9
+ def __init__(self, path_to_model: str = '.'):
10
  # Preload all the elements you are going to need at inference.
11
  # pseudo:
12
  self.tokenizer = BertTokenizerFast.from_pretrained(path_to_model)