File size: 2,266 Bytes
bf66e5a 366e62e bf66e5a 833b301 000ad8b bf66e5a 216cf30 bf66e5a 4f7f1d5 bf66e5a 000ad8b 0e224b1 b29c898 0e224b1 000ad8b 02ffbef e1040a6 000ad8b cd860a6 000ad8b 5e35500 392d92f 833b301 bf66e5a 366e62e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
from typing import Dict, List, Any
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, StoppingCriteria, StoppingCriteriaList
class EndpointHandler():
def __init__(self, path=""):
# Preload all the elements you are going to need at inference.
tokenizer = AutoTokenizer.from_pretrained(path)
tokenizer.pad_token = tokenizer.eos_token
self.model = AutoModelForCausalLM.from_pretrained(path)
self.tokenizer = tokenizer
self.stopping_criteria = StoppingCriteriaList([StopAtPeriodCriteria(tokenizer)])
def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
"""
data args:
inputs (:obj: `str`)
kwargs
Return:
A :obj:`list` | `dict`: will be serialized and returned
"""
inputs = data.pop("inputs", data)
# 3070, 10456, [313, 334] corresponds to "(*", and we do not want to output a comment
# 13 is a newline character
# [1976, 441, 29889] is "Abort."
# [2087, 29885, 4430, 29889] is "Admitted."
bad_words_ids = [[3070], [313, 334], [10456], [13], [1976, 441, 29889], [2087, 29885, 4430, 29889]]
input_ids = self.tokenizer.encode(inputs, return_tensors="pt")
# Generate text using model.generate
generated_ids = self.model.generate(
input_ids,
max_length=input_ids.shape[1] + 50, # 50 new tokens
bad_words_ids=bad_words_ids,
temperature=1,
top_k=40,
stopping_criteria=self.stopping_criteria,
)
generated_text = self.tokenizer.decode(generated_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
prediction = [{"generated_text": generated_text, "generated_ids": generated_ids[0][input_ids.shape[1]:].tolist()}]
return prediction
class StopAtPeriodCriteria(StoppingCriteria):
def __init__(self, tokenizer):
self.tokenizer = tokenizer
def __call__(self, input_ids, scores, **kwargs):
# Decode the last generated token to text
last_token_text = self.tokenizer.decode(input_ids[:, -1], skip_special_tokens=True)
# Check if the decoded text ends with a period
return '.' in last_token_text |