File size: 1,598 Bytes
ab3c637
 
 
 
 
 
 
 
 
 
 
bc57463
49e23eb
e15cc06
49e23eb
 
 
 
 
 
 
 
 
 
 
 
 
 
ab3c637
dcf4887
 
 
 
 
 
 
673400c
b6f4106
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import gradio as gr
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer

def translate(text):
  model_name = 'hackathon-pln-es/t5-small-finetuned-spanish-to-quechua'
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
  tokenizer = AutoTokenizer.from_pretrained(model_name)
  
  input = tokenizer(text, return_tensors="pt")
  output = model.generate(input["input_ids"], max_length=40, num_beams=4, early_stopping=True)
  
  return tokenizer.decode(output[0], skip_special_tokens=True)
  
title = "Spanish to Quechua translation 🦙"
inputs = gr.inputs.Textbox(lines=1, label="Text in Spanish")
outputs = [gr.outputs.Textbox(label="Translated text in Quechua")]

description = "Here use the [t5-small-finetuned-spanish-to-quechua-model](https://huggingface.co/hackathon-pln-es/t5-small-finetuned-spanish-to-quechua) that was trained with [spanish-to-quechua dataset](https://huggingface.co/datasets/hackathon-pln-es/spanish-to-quechua)."

article = '''
## Challenges
- Create a dataset, as there are different variants of Quechua.
- Training of the model to optimize results using the least amount of computational resources.

## Team members
- [Sara Benel](https://huggingface.co/sbenel)
- [Jose Vílchez](https://huggingface.co/JCarlos)
'''

examples=[
  'Dios ama a los hombres',
  'A pesar de todo, soy feliz',
  '¿Qué harán allí?',
  'Debes aprender a respetar',
]

iface = gr.Interface(fn=translate, inputs=inputs, outputs=outputs, theme="grass", css="styles.css", examples=examples, title=title, description=description, article=article)
iface.launch(enable_queue=True)