|
import gradio as gr |
|
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer |
|
|
|
def translate(text): |
|
model_name = 'hackathon-pln-es/t5-small-finetuned-spanish-to-quechua' |
|
model = AutoModelForSeq2SeqLM.from_pretrained(model_name) |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
input = tokenizer(text, return_tensors="pt") |
|
output = model.generate(input["input_ids"], max_length=40, num_beams=4, early_stopping=True) |
|
|
|
return tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
|
title = "Spanish to Quechua translation 🦙" |
|
inputs = gr.inputs.Textbox(lines=1, label="Text in Spanish") |
|
outputs = [gr.outputs.Textbox(label="Translated text in Quechua")] |
|
|
|
description = "Here use the [t5-small-finetuned-spanish-to-quechua-model](https://huggingface.co/hackathon-pln-es/t5-small-finetuned-spanish-to-quechua) that was trained with [spanish-to-quechua dataset](https://huggingface.co/datasets/hackathon-pln-es/spanish-to-quechua)." |
|
|
|
article = ''' |
|
## Challenges |
|
- Create a dataset, as there are different variants of Quechua. |
|
- Training of the model to optimize results using the least amount of computational resources. |
|
|
|
## Team members |
|
- [Sara Benel](https://huggingface.co/sbenel) |
|
- [Jose Vílchez](https://huggingface.co/JCarlos) |
|
''' |
|
|
|
examples=[ |
|
'Dios ama a los hombres', |
|
'A pesar de todo, soy feliz', |
|
'¿Qué harán allí?', |
|
'Debes aprender a respetar', |
|
] |
|
|
|
iface = gr.Interface(fn=translate, inputs=inputs, outputs=outputs, theme="grass", css="styles.css", examples=examples, title=title, description=description, article=article) |
|
iface.launch() |