import os import gradio as grad from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, TextStreamer auth_token = os.environ.get("auth_token") model_c = 'nadsoft/faseeh.v.9' tokenizer = AutoTokenizer.from_pretrained(model_c,src_lang='ar_AR', tgt_lang='en_XX',use_auth_token=auth_token) model = AutoModelForSeq2SeqLM.from_pretrained(model_c,use_auth_token=auth_token) streamer = TextStreamer(tokenizer) def translate(text): inputs = tokenizer(text, return_tensors="pt") outputs = model.generate(**inputs, max_length=128, num_beams=1,streamer=streamer) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response grad.Interface(translate, inputs=["text"], outputs=["text"]).launch()