RandomPrompt-v1 / app.py
saltacc's picture
use remote repo
dbede30
import gradio as gr
import torch.cuda
from transformers import AutoModelForCausalLM, AutoTokenizer
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = AutoModelForCausalLM.from_pretrained("saltacc/RandomPrompt-v1")
model.to(device)
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-125M")
tokenizer.pad_token = tokenizer.eos_token
def detect(text_in, max_length):
if not text_in:
inputs = tokenizer.pad_token
else:
inputs = text_in
text = tokenizer.batch_decode(model.generate(tokenizer.encode(inputs,
return_tensors='pt').to(device),
do_sample=True,
temperature=0.9,
max_length=max_length))[0]
text = text.replace(tokenizer.pad_token, '')
return text
iface = gr.Interface(fn=detect, inputs=[gr.Textbox(), gr.Slider(100, 200, default=120)], outputs=gr.TextArea())
iface.launch()