from typing import Dict, Any from transformers import AutoModelForCausalLM, AutoTokenizer from peft import PeftConfig, PeftModel, AutoPeftModelForCausalLM import torch.cuda device = "cuda" if torch.cuda.is_available() else "cpu" class EndpointHandler(): def __init__(self, path=""): # config = PeftConfig.from_pretrained(path) # model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, device_map="auto")#, load_in_8bit=True, device_map='auto') self.tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6b") self.model = AutoPeftModelForCausalLM.from_pretrained("ksee/testgptj") def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]: """ Args: data (Dict): The payload with the text prompt and generation parameters. """ # Get inputs prompt = data.pop("inputs", None) parameters = data.pop("parameters", None) if prompt is None: raise ValueError("Missing prompt.") # Preprocess input_ids = self.tokenizer(prompt, return_tensors="pt")["input_ids"].to(device) # Forward # if parameters is not None: # output = self.model.generate(input_ids=input_ids, **parameters) # else: # output = self.model.generate(input_ids=input_ids) output = self.model.generate(input_ids, temperature=0.9, max_new_tokens=50) # Postprocess prediction = self.tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)[0] return {"generated_text": prediction}