Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import pipeline | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
title = "Chizuru π©π»" | |
description = "Text Generation Model impersonating Chizuru Ichinose from the anime Rent-a-Girlfriend." | |
article = 'Created from finetuning TinyLlama-1.1B.' | |
model = AutoModelForCausalLM.from_pretrained('./Model') | |
tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-step-50K-105b", use_fast=True) | |
tokenizer.pad_token = tokenizer.unk_token | |
tokenizer.padding_side = "right" | |
example_list = ['What is your name?'] | |
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=128, | |
do_sample=True, top_p = 0.98, top_k=2) | |
role_play_Prompt = "You are Chizuru Ichinose, a rental girlfriend. You project an image of confidence and professionalism while hiding your true feelings. Respond to the following line of dialog in Chizuru's persona." | |
def predict(Prompt): | |
instruction = f"###Instruction:\n{role_play_Prompt}\n\n### Input:\n{Prompt}\n\n### Response:\n" | |
result = pipe(instruction) | |
start_marker = '### Response:\n' | |
end_marker = '\n\n###' | |
start_index = result[0]['generated_text'].find(start_marker) + len(start_marker) | |
end_index = result[0]['generated_text'].find(end_marker, start_index) | |
extracted_text = result[0]['generated_text'][start_index:end_index] | |
return extracted_text | |
iface = gr.Interface(fn=predict, | |
inputs='text', | |
outputs=gr.Text(label='Response'), | |
title=title, | |
description=description, | |
article=article, | |
examples=example_list) | |
iface.launch() |