Update handler.py
Browse files- handler.py +3 -6
handler.py
CHANGED
@@ -10,17 +10,16 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
10 |
|
11 |
class EndpointHandler():
|
12 |
def __init__(self, path=""):
|
13 |
-
|
|
|
14 |
self.tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
|
15 |
-
|
16 |
-
self.model = PeftModel.from_pretrained(model, path)
|
17 |
|
18 |
def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
|
19 |
"""
|
20 |
Args:
|
21 |
data (Dict): The payload with the text prompt and generation parameters.
|
22 |
"""
|
23 |
-
LOGGER.info(f"Received data: {data}")
|
24 |
# Get inputs
|
25 |
prompt = data.pop("inputs", None)
|
26 |
parameters = data.pop("parameters", None)
|
@@ -29,12 +28,10 @@ class EndpointHandler():
|
|
29 |
# Preprocess
|
30 |
input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids.to(device)
|
31 |
# Forward
|
32 |
-
LOGGER.info(f"Start generation.")
|
33 |
if parameters is not None:
|
34 |
output = self.model.generate(input_ids=input_ids, **parameters)
|
35 |
else:
|
36 |
output = self.model.generate(input_ids=input_ids)
|
37 |
# Postprocess
|
38 |
prediction = self.tokenizer.decode(output[0])
|
39 |
-
LOGGER.info(f"Generated text: {prediction}")
|
40 |
return {"generated_text": prediction}
|
|
|
10 |
|
11 |
class EndpointHandler():
|
12 |
def __init__(self, path=""):
|
13 |
+
config = PeftConfig.from_pretrained("JeremyArancio/llm-tolkien")
|
14 |
+
self.model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, load_in_8bit=True, device_map='auto')
|
15 |
self.tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
|
16 |
+
|
|
|
17 |
|
18 |
def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
|
19 |
"""
|
20 |
Args:
|
21 |
data (Dict): The payload with the text prompt and generation parameters.
|
22 |
"""
|
|
|
23 |
# Get inputs
|
24 |
prompt = data.pop("inputs", None)
|
25 |
parameters = data.pop("parameters", None)
|
|
|
28 |
# Preprocess
|
29 |
input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids.to(device)
|
30 |
# Forward
|
|
|
31 |
if parameters is not None:
|
32 |
output = self.model.generate(input_ids=input_ids, **parameters)
|
33 |
else:
|
34 |
output = self.model.generate(input_ids=input_ids)
|
35 |
# Postprocess
|
36 |
prediction = self.tokenizer.decode(output[0])
|
|
|
37 |
return {"generated_text": prediction}
|