|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
import re |
|
import time |
|
import torch |
|
|
|
class EndpointHandler(): |
|
|
|
def __init__(self, path = ""): |
|
self.tokenizer = AutoTokenizer.from_pretrained(path) |
|
self.model = torch.load(f"{path}/torch_model.pt") |
|
self.default_template = open(f"{path}/default_template.txt", "r").read() |
|
|
|
def __call__(self, data): |
|
request_inputs = data.pop("inputs", data) |
|
template = request_inputs["template"] |
|
messages = request_inputs["messages"] |
|
char_name = request_inputs["char_name"] |
|
user_name = request_inputs["user_name"] |
|
chats_curled = request_inputs["chats_curled"] |
|
user_input = [ |
|
"{name}: {message}".format( |
|
name = char_name if (id["role"] == "AI") else user_name, |
|
message = id["message"].strip() |
|
) for id in messages |
|
] |
|
while True: |
|
prompt = self.default_template.format(char_name = char_name, user_name = user_name, user_input = "\n".join(user_input)) |
|
input_ids = self.tokenizer(prompt + f"\n{char_name}:", return_tensors = "pt").to("cuda") |
|
if input_ids.input_ids.size(1) > 2000: |
|
chats_curled += 1 |
|
user_input = user_input[chats_curled*2:] |
|
else: break |
|
encoded_output = self.model.generate( |
|
input_ids["input_ids"], |
|
max_new_tokens = 50, |
|
temperature = 0.5, |
|
top_p = 0.9, |
|
top_k = 0, |
|
repetition_penalty = 1.1, |
|
pad_token_id = 50256, |
|
num_return_sequences = 1 |
|
) |
|
decoded_output = self.tokenizer.decode(encoded_output[0], skip_special_tokens=True).replace(prompt,"") |
|
decoded_output = decoded_output.split(f"{char_name}:", 1)[1].split(f"{user_name}:",1)[0].strip() |
|
parsed_result = re.sub('\*.*?\*', '', decoded_output).strip() |
|
if len(parsed_result) != 0: decoded_output = parsed_result |
|
decoded_output = " ".join(decoded_output.replace("*","").split()) |
|
try: |
|
parsed_result = decoded_output[:[m.start() for m in re.finditer(r'[.!?]', decoded_output)][-1]+1] |
|
if len(parsed_result) != 0: decoded_output = parsed_result |
|
except Exception: pass |
|
return { |
|
"role": "AI", |
|
"message": decoded_output, |
|
"chats_curled": chats_curled |
|
} |