import gradio as gr from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer import torch model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M") tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M") def m2m_translator(src_lang, dest_lang, text): tokenizer.src_lang = src_lang encoded = tokenizer(text, return_tensors="pt") generated_tokens = model.generate(**encoded, forced_bos_token_id=tokenizer.get_lang_id(dest_lang)) response = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) return ' '.join(response) def greet(text): return m2m_translator("ru","en",text) iface = gr.Interface(fn=greet, inputs="text", outputs="text") iface.launch()