translation / app.py
bolleman's picture
initial
b190ac0
import gradio as gr
from transformers import pipeline
import spaces
import torch
# Check if CUDA is available and set device accordingly
device = 0 if torch.cuda.is_available() else -1
# Initialize the translation pipeline with the device argument
translator = pipeline("translation", model="facebook/m2m100_418M", device=device)
LANGS = ["hi", "zh", "fr", "en"]
@spaces.GPU
def translate(text, src_lang, tgt_lang):
"""
Translate the text from source lang to target lang
"""
translation = translator(text, src_lang=src_lang, tgt_lang=tgt_lang)
return translation[0]['translation_text']
demo = gr.Interface(
fn=translate,
inputs=[
gr.components.Textbox(label="Text"),
gr.components.Dropdown(label="Source Language", choices=LANGS),
gr.components.Dropdown(label="Target Language", choices=LANGS),
],
outputs=["text"],
examples=[["Building a translation demo with Gradio is so easy!", "en", "fr"]],
cache_examples=False,
title="Translation Demo",
description="This demo is a simplified version of the original [NLLB-Translator](https://huggingface.co/spaces/Narrativaai/NLLB-Translator) space"
)
demo.queue(default_concurrency_limit=8)
demo.launch()