Spaces:
Running
Running
File size: 2,369 Bytes
ad748cf 7203a8a 1a5c0c8 e2a7184 7203a8a 1a5c0c8 7203a8a e2a7184 1a5c0c8 e2a7184 7203a8a e2a7184 7203a8a ad748cf 7203a8a e2a7184 7203a8a ad748cf 9aeae97 5fb6e0f ad748cf 1a5c0c8 e2a7184 ad748cf 7203a8a ad748cf 1a5c0c8 ad748cf 1a5c0c8 e2a7184 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
import gradio as gr
from huggingface_hub import InferenceClient
# Initialize the InferenceClient with your model from Hugging Face
client = InferenceClient(model="pro-grammer/MindfulAI")
def respond(message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p):
# Build a prompt string manually
prompt = system_message + "\n"
for user_msg, assistant_msg in history:
prompt += f"Human: {user_msg}\nAssistant: {assistant_msg}\n"
prompt += f"Human: {message}\nAssistant:"
response = ""
# Use text_generation instead of chat_completion
for token in client.text_generation(
prompt,
max_new_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
# Depending on the API response structure, extract the generated text
token_text = token.get("generated_text", "")
response += token_text
yield response
if "Human" in response:
location = response.find("Human")
response = response[0:location]
if "Me" in response:
location = response.find("Me")
response = response[0:location]
if "You" in response:
location = response.find("You")
response = response[0:location]
# Print disclaimer at the end
print("""IMPORTANT: I am an AI project created to demonstrate therapeutic conversation patterns and am not a licensed mental health professional. If you're struggling with any emotional, mental health, or personal challenges, please seek help from a qualified therapist. You can find licensed therapists at BetterHelp.com.
Remember, there's no substitute for professional mental healthcare. This is just a demonstration project.""")
demo = gr.ChatInterface(
fn=respond,
title="MindfulAI Chat",
description="Chat with MindfulAI – your AI Therapist powered by your model.",
additional_inputs=[
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
],
)
if __name__ == "__main__":
demo.launch()
|