hdnh2006 commited on
Commit
d6d0889
1 Parent(s): fd2b689

handler.py added

Browse files
Files changed (1) hide show
  1. handler.py +45 -0
handler.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import LlamaForCausalLM, LlamaTokenizer, pipeline
3
+
4
+ # get dtype
5
+ dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] == 8 else torch.float16
6
+
7
+
8
+ class EndpointHandler:
9
+ def __init__(self, path=""):
10
+ # load the model
11
+ self.tokenizer = LlamaTokenizer.from_pretrained(path)
12
+ model = LlamaForCausalLM.from_pretrained(path, load_in_4bit=True, device_map=0, torch_dtype=torch.float16)
13
+ # create inference pipeline
14
+ self.pipeline = pipeline("text-generation", model=model, tokenizer=self.tokenizer)
15
+
16
+ # def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
17
+ # inputs = data.pop("inputs", data)
18
+ # parameters = data.pop("parameters", None)
19
+
20
+ # # pass inputs with all kwargs in data
21
+ # if parameters is not None:
22
+ # prediction = self.pipeline(inputs, **parameters)
23
+ # else:
24
+ # prediction = self.pipeline(inputs)
25
+ # # postprocess the prediction
26
+ # return prediction
27
+
28
+ def __call__(self, message: str):
29
+
30
+ sequences = self.pipeline(
31
+ message,
32
+ do_sample=True,
33
+ top_k=10,
34
+ num_return_sequences=1,
35
+ eos_token_id=self.tokenizer.eos_token_id,
36
+ max_length=2048,
37
+ )
38
+
39
+
40
+ generated_text = sequences[0]['generated_text']
41
+ response = generated_text[len(message):] # Remove the prompt from the output
42
+
43
+ print("Chatbot:", response.strip())
44
+
45
+ response.strip()