Jellyfish042's picture
Update app.py
4d3d295
raw
history blame
1.01 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
model = AutoModelForSeq2SeqLM.from_pretrained("./models/checkpoint-15000/")
def text_processing(text):
inputs = [text]
# Tokenize and prepare the inputs for model
input_ids = tokenizer(inputs, return_tensors="pt", max_length=512, truncation=True, padding="max_length").input_ids.to(device)
attention_mask = tokenizer(inputs, return_tensors="pt", max_length=512, truncation=True, padding="max_length").attention_mask.to(device)
# Generate prediction
output = model.generate(input_ids=input_ids, attention_mask=attention_mask, max_new_tokens=512)
# Decode the prediction
decoded_output = [tokenizer.decode(ids, skip_special_tokens=True) for ids in output]
return decoded_output[0]
iface = gr.Interface(fn = text_processing, inputs='text', outputs=['text'], title='test', description='test space')
iface.launch(inline=False)