import gradio as gr from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM # Specify your finetuned model name here model_name = "BotCuddles/to_deploy" # Load your finetuned model and tokenizer from the Hugging Face Hub try: tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype="auto") generator = pipeline("text-generation", model=model, tokenizer=tokenizer) print("Model and tokenizer loaded successfully!") except Exception as e: print(f"Error loading model and tokenizer: {e}") generator = None def generate_text(prompt): if generator: try: full_prompt = "Understand the following message from user and give a compassionate reply. Message: " + prompt result = generator(full_prompt, max_length=50, num_return_sequences=1) return result[0]['generated_text'] except Exception as e: return f"Error generating text: {e}" else: return "Model not loaded properly." iface = gr.Interface( fn=generate_text, inputs=gr.Textbox(lines=2, placeholder="Enter your prompt here..."), outputs="text", title="Demo - Lift Me Up", description="Generate text using Lift-Me Bot." ) if __name__ == "__main__": iface.launch()