import gradio as gr from transformers import AutoModelForSeq2SeqLM, AutoTokenizer import os # Ensure the token is set in the environment variables hf_token = os.getenv('HF_TOKEN') if not hf_token: raise EnvironmentError("Hugging Face token not found. Set it in the environment variable 'HF_TOKEN'.") # Load the model and tokenizer model_name = "IbrahimAL24/TNtoEng-By-Ibrahim-V1" def load_model_and_tokenizer(model_name, token): try: model = AutoModelForSeq2SeqLM.from_pretrained(model_name, token=token) tokenizer = AutoTokenizer.from_pretrained(model_name, token=token) return model, tokenizer except Exception as e: raise RuntimeError(f"Error loading model or tokenizer: {e}") model, tokenizer = load_model_and_tokenizer(model_name, hf_token) def translate(text): inputs = tokenizer(text, return_tensors="pt") outputs = model.generate(**inputs) result = tokenizer.decode(outputs[0], skip_special_tokens=True) return result # Define the Gradio interface iface = gr.Interface( fn=translate, inputs=gr.Textbox(label="Input Text", lines=5, placeholder="Enter text here..."), outputs=gr.Textbox(label="Output Text", lines=5, placeholder="Translated text will appear here..."), title="Translate Tounsi (Arabizi) to English By Ibrahim", description="Translate Tounsi (Arabizi) text to English using a fine-tuned model." ) # Launch the app iface.launch()