ammarnasr commited on
Commit
80c2057
1 Parent(s): d4f23f9

Create handler.py

Browse files
Files changed (1) hide show
  1. handler.py +28 -0
handler.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List
2
+ import torch
3
+ import transformers
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer
5
+
6
+ dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] ==8 else torch.float16
7
+
8
+ class EndpointHandler:
9
+ def __init__(self, path=""):
10
+ self.tokenizer = AutoTokenizer.from_pretrained(path)
11
+ self.model = AutoModelForCausalLM.from_pretrained(path, trust_remote_code=True, revision="main")
12
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
+ self.model = self.model.to(self.device)
14
+
15
+
16
+ def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
17
+ prompt = data["inputs"]
18
+ if "config" in data:
19
+ config = data.pop("config", None)
20
+ else:
21
+ config = {'max_new_tokens':100}
22
+ input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids.to(self.device)
23
+ generated_ids = self.model.generate(input_ids, **config)
24
+ generated_text = self.tokenizer.decode(generated_ids[0], skip_special_tokens=True)
25
+ return [{"generated_text": generated_text}]
26
+
27
+
28
+