matthewkenney commited on
Commit
509f4a0
1 Parent(s): f5cce54

Create handler.py

Browse files
Files changed (1) hide show
  1. handler.py +22 -0
handler.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
3
+ import torch
4
+
5
+ class EndpointHandler():
6
+ def __init__(self, path=""):
7
+ self.base_model = path
8
+
9
+ bitsandbytes= BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16)
10
+ self.model = AutoModelForCausalLM.from_pretrained(self.base_model, device_map={"":0},quantization_config= bitsandbytes, trust_remote_code= True)
11
+ self.tokenizer = AutoTokenizer.from_pretrained(self.base_model, trust_remote_code=True)
12
+ self.tokenizer.pad_token = self.tokenizer.eos_token
13
+
14
+ def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
15
+ inputs = data.pop("inputs",data)
16
+ prompt = f"Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {inputs} ### Response:"
17
+ model_inputs = self.tokenizer([prompt], return_tensors="pt", padding=True).to("cuda")
18
+ generated_ids = self.model.generate(**model_inputs, max_length=200)
19
+ output = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
20
+ answer_without_prompt = output[0].split("### Response:")[1].strip()
21
+ prediction = answer_without_prompt.split("###")[0].strip()
22
+ return [{"generated_text": prediction}]