import gradio as gr from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline import torch LANGS = ["kin_Latn","eng_Latn"] TASK = "translation" # CKPT = "DigitalUmuganda/Finetuned-NLLB" MODELS = ["facebook/nllb-200-distilled-600M","DigitalUmuganda/Finetuned-NLLB"] # model = AutoModelForSeq2SeqLM.from_pretrained(CKPT) # tokenizer = AutoTokenizer.from_pretrained(CKPT) device = 0 if torch.cuda.is_available() else -1 du_model = AutoModelForSeq2SeqLM.from_pretrained("DigitalUmuganda/Finetuned-NLLB") fb_model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-distilled-600M") models = {"facebook/nllb-200-distilled-600M":fb_model,"DigitalUmuganda/Finetuned-NLLB":du_model} tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-200-distilled-600M") # def translate(text, src_lang, tgt_lang, max_length=400): def translate(CKPT,text, src_lang, tgt_lang, max_length=400): """ Translate the text from source lang to target lang """ translation_pipeline = pipeline(TASK, model=models[CKPT], tokenizer=tokenizer, src_lang=src_lang, tgt_lang=tgt_lang, max_length=max_length, device=device) result = translation_pipeline(text) return result[0]['translation_text'] gr.Interface( translate, [ gr.components.Dropdown(label="choose a model",choices=MODELS), gr.components.Textbox(label="Text"), gr.components.Dropdown(label="Source Language", choices=LANGS), gr.components.Dropdown(label="Target Language", choices=LANGS), #gr.components.Slider(8, 512, value=400, step=8, label="Max Length") ], ["text"], #examples=examples, # article=article, cache_examples=False, title="Finetuned-NLLB-1", #description=description ).launch()