lucafirefox's picture
postprocess
94bd5b3
import torch
import subprocess
import sys
from typing import Dict, List, Any
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
class EndpointHandler:
def __init__(self, path=""):
subprocess.check_call([sys.executable, "-m", "pip", "install", "flash-attn"])
subprocess.check_call([sys.executable, "-m", "pip", "install", "tiktoken"])
subprocess.check_call([sys.executable, "-m", "pip", "install", "pytest"])
# load the model
tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(path, device_map="auto",torch_dtype="auto", trust_remote_code=True)
# create inference pipeline
self.pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=4096)
def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
inputs = data.pop("inputs", data)
parameters = data.pop("parameters", None)
messages = [
{"role": "system", "content": "Translate the text to English. Rephrase if needed to ensure it sounds natural. Output only the translated text."},
{"role": "user", "content": inputs},
]
# pass inputs with all kwargs in data
if parameters is not None:
prediction = self.pipeline(messages, **parameters)
else:
prediction = self.pipeline(messages)
# postprocess the prediction
assistant_output = prediction[0]["generated_text"][-1]
return {"output": assistant_output["content"]}