File size: 2,241 Bytes
8c88b7a 2d660d0 8c88b7a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
from typing import Dict, Any, List
from transformers import pipeline
import torch
class EndpointHandler:
def __init__(self, path=""):
# load model and processor from path
self.pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-beta", torch_dtype=torch.bfloat16, device_map="auto")
def __call__(self, data: Dict[str, Any]) -> List[List[Dict[str, str]]]:
"""
Args:
data (:dict:):
The payload with the text prompt and generation parameters.
"""
# process input
inputs = data.pop("inputs", data)
parameters = data.pop("parameters", None)
if isinstance(inputs, list) and isinstance(inputs[0], list) or isinstance(inputs[0], dict):
if isinstance(inputs[0], dict):
inputs = [inputs]
messages = inputs
else:
if isinstance(inputs, str):
messages = [[
{
"role": "system",
"content": "You are a helpful AI assistant",
},
{"role": "user", "content": inputs},
]]
else:
messages = [[
{
"role": "system",
"content": "You are a helpful AI assistant",
},
{"role": "user", "content": input},
] for input in inputs]
prompts = []
for message in messages:
prompts += [self.pipe.tokenizer.apply_chat_template(message, tokenize=False, add_generation_prompt=True)]
# pass inputs with all kwargs in data
if parameters is not None:
# Make endpoint compatible with hf client library
exclude_list = ["stop", "watermark", "details", "decoder_input_details"]
parameters = {name: val for name, val in parameters.items if name not in exclude_list}
outputs = self.pipe(
prompts,
**parameters)
else:
outputs = self.pipe(
prompts, max_new_tokens=32,
)
return [{"generated_text": outputs}] |