from typing import Dict, List, Any from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModelForQuestionAnswering, AutoModel, pipeline class EndpointHandler(): def __init__(self, path=""): # init # load the model tokenizer = AutoTokenizer.from_pretrained("verseAI/vai-GPT-NeoXT-Chat-Base-20B") model = AutoModelForCausalLM.from_pretrained("verseAI/vai-GPT-NeoXT-Chat-Base-20B", device_map="auto", load_in_8bit=True) # THROWS ERROR model = AutoModelForQuestionAnswering.from_pretrained("verseAI/vai-GPT-NeoXT-Chat-Base-20B", device_map="auto", load_in_8bit=True) # model = AutoModel.from_pretrained("verseAI/vai-GPT-NeoXT-Chat-Base-20B", device_map="auto", load_in_8bit=True) # create inference pipeline self.pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer) #self.pipeline = pipeline("question-answering", model=model, tokenizer=tokenizer) def __call__(self, data: Dict[str, Any]) -> List[List[Dict[str, float]]]: """ data args: inputs (:obj: `str`) date (:obj: `str`) Return: A :obj:`list` | `dict`: will be serialized and returned from transformers import AutoTokenizer, AutoModelForCausalLM """ inputs = data.pop("inputs", data) parameters = data.pop("parameters", None) # print(input) # pass inputs with all kwargs in data if parameters is not None: prediction = self.pipeline(inputs, **parameters) else: prediction = self.pipeline(inputs) # postprocess the prediction return prediction """ inputs = self.tokenizer(": Hello!\n:", return_tensors='pt').to(self.model.device) outputs = self.model.generate(**inputs, max_new_tokens=10, do_sample=True, temperature=0.8) output_str = self.tokenizer.decode(outputs[0]) print(output_str) # return output_str return {"generated_text": output_str} """