TherapyTone / app.py
ageraustine's picture
rm looping
fc3bcbd verified
import gradio as gr
import requests
import tempfile
import os
from langchain_openai import ChatOpenAI
from langchain_core.runnables import RunnablePassthrough
from langchain.prompts import PromptTemplate
# Mood assessment questions
MOOD_QUESTIONS = [
"On a scale of 1-5, how would you rate your current energy level?",
"On a scale of 1-5, how would you rate your current stress level?",
"On a scale of 1-5, how happy do you feel right now?",
"What emotions are you experiencing the most right now? (e.g., joy, anxiety, sadness)",
"What type of mood would you like to achieve?"
]
# Initialize LangChain components
llm = ChatOpenAI(model="gpt-4o-mini")
music_prompt_template = """
Based on the user's mood assessment:
- Energy level: {energy}
- Stress level: {stress}
- Happiness level: {happiness}
- Current emotions: {current_emotions}
- Desired mood: {desired_mood}
Musical preferences (if specified):
- Preferred genre: {genre}
- Preferred instruments: {instruments}
- Preferred tempo: {tempo}
- Preferred mood: {preferred_mood}
Generate a detailed music prompt that would help the user transition from their current emotional state to their desired mood.
The prompt should incorporate the user's musical preferences if provided, while focusing on therapeutic musical qualities.
Keep the description under 100 words and emphasize the healing aspects of the music.
"""
prompt = PromptTemplate(
input_variables=["energy", "stress", "happiness", "current_emotions", "desired_mood",
"genre", "instruments", "tempo", "preferred_mood"],
template=music_prompt_template
)
music_chain = RunnablePassthrough() | prompt | llm
def analyze_mood_and_generate_prompt(responses, preferences):
"""Convert questionnaire responses and preferences into a music generation prompt using LangChain"""
try:
prompt_result = music_chain.invoke({
"energy": responses[0],
"stress": responses[1],
"happiness": responses[2],
"current_emotions": responses[3],
"desired_mood": responses[4],
"genre": preferences["genre"] or "any",
"instruments": preferences["instruments"] or "any",
"tempo": preferences["tempo"] or "any",
"preferred_mood": preferences["preferred_mood"] or "any"
})
return prompt_result.content
except Exception as e:
return f"Error generating prompt: {str(e)}"
def generate_music(prompt, duration=10):
"""Generate music using the MusicGen API"""
API_URL = "https://api-inference.huggingface.co/models/facebook/musicgen-small"
headers = {"Authorization": f"Bearer {os.getenv('HF_API_KEY')}"}
payload = {
"inputs": prompt
}
try:
response = requests.post(API_URL, headers=headers, json=payload, timeout=300)
if response.status_code != 200:
return None, f"Error: API returned status code {response.status_code}"
with tempfile.NamedTemporaryFile(delete=False, suffix='.wav') as tmp_file:
tmp_file.write(response.content)
tmp_file_path = tmp_file.name
return tmp_file_path, "Music generated successfully!"
except Exception as e:
return None, f"Error: {str(e)}"
def gradio_interface(energy, stress, happiness, current_emotions, desired_mood,
genre, instruments, tempo, preferred_mood):
"""Main interface function that processes questionnaire and generates music"""
# Combine responses
responses = [energy, stress, happiness, current_emotions, desired_mood]
preferences = {
"genre": genre,
"instruments": instruments,
"tempo": tempo,
"preferred_mood": preferred_mood
}
# Generate music prompt using LangChain
music_prompt = analyze_mood_and_generate_prompt(responses, preferences)
# Generate music using the prompt
audio_path, message = generate_music(music_prompt)
if audio_path:
return audio_path, music_prompt, message
else:
raise gr.Error(message)
# Create the Gradio app
with gr.Blocks() as demo:
gr.Markdown("""
# Therapeutic Music Generator
Complete the mood assessment questionnaire and optionally specify your musical preferences to receive personalized music
that helps you achieve your desired emotional state.
""")
with gr.Row():
with gr.Column():
# Questionnaire inputs
energy_input = gr.Slider(
minimum=1,
maximum=5,
value=3,
step=1,
label=MOOD_QUESTIONS[0]
)
stress_input = gr.Slider(
minimum=1,
maximum=5,
value=3,
step=1,
label=MOOD_QUESTIONS[1]
)
happiness_input = gr.Slider(
minimum=1,
maximum=5,
value=3,
step=1,
label=MOOD_QUESTIONS[2]
)
emotions_input = gr.Textbox(
label=MOOD_QUESTIONS[3],
placeholder="e.g., anxiety, excitement, sadness"
)
desired_mood_input = gr.Textbox(
label=MOOD_QUESTIONS[4],
placeholder="e.g., calm, energized, focused"
)
gr.Markdown("### Musical Preferences (Optional)")
genre_input = gr.Textbox(
label="Preferred Genre",
placeholder="e.g., classical, jazz, ambient"
)
instruments_input = gr.Textbox(
label="Preferred Instruments",
placeholder="e.g., piano, guitar, strings"
)
tempo_input = gr.Textbox(
label="Preferred Tempo",
placeholder="e.g., slow, moderate, fast"
)
mood_input = gr.Textbox(
label="Preferred Musical Mood",
placeholder="e.g., melancholic, uplifting, peaceful"
)
generate_button = gr.Button("Generate Therapeutic Music")
with gr.Column():
prompt_output = gr.Textbox(label="Generated Music Prompt")
audio_output = gr.Audio(label="Generated Music")
message_output = gr.Textbox(label="Status")
generate_button.click(
fn=gradio_interface,
inputs=[
energy_input,
stress_input,
happiness_input,
emotions_input,
desired_mood_input,
genre_input,
instruments_input,
tempo_input,
mood_input
],
outputs=[
audio_output,
prompt_output,
message_output
]
)
if __name__ == "__main__":
demo.launch()