|
from typing import Dict, List, Any |
|
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig |
|
import torch |
|
|
|
class EndpointHandler(): |
|
def __init__(self, path=""): |
|
self.base_model = path |
|
|
|
bitsandbytes= BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16) |
|
self.model = AutoModelForCausalLM.from_pretrained(self.base_model, device_map={"":0},quantization_config= bitsandbytes, trust_remote_code= True) |
|
self.tokenizer = AutoTokenizer.from_pretrained(self.base_model, trust_remote_code=True) |
|
self.tokenizer.pad_token = self.tokenizer.eos_token |
|
|
|
def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]: |
|
inputs = data.pop("inputs",data) |
|
prompt = f"Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {inputs} ### Response:" |
|
model_inputs = self.tokenizer([prompt], return_tensors="pt", padding=True).to("cuda") |
|
generated_ids = self.model.generate(**model_inputs, max_length=200) |
|
output = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True) |
|
answer_without_prompt = output[0].split("### Response:")[1].strip() |
|
prediction = answer_without_prompt.split("###")[0].strip() |
|
return [{"generated_text": prediction}] |