import gradio as gr from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer # Load the pre-trained model from Hugging Face model_name = "llmModeluser/therapy_trained_model" model = AutoModelForCausalLM.from_pretrained(model_name) # Load the tokenizer directly from the Hugging Face model path tokenizer = AutoTokenizer.from_pretrained("llmModeluser/therapy_trained_model") # Create the text generation pipeline nlp = pipeline("text-generation", model=model, tokenizer=tokenizer) def chat_response(user_input): # Generate a response using the pre-trained model response = nlp(user_input, max_length=100, do_sample=True, top_k=50, top_p=0.95, num_return_sequences=1)[0]["generated_text"] return response iface = gr.Interface(fn=chat_response, inputs=gr.Textbox(lines=1, placeholder="Enter your message here..."), outputs="text", title="Therapy Chatbot") iface.launch(server_port=7861)