HareemFatima's picture
Update app.py
0f0cd12 verified
raw
history blame
1.77 kB
import streamlit as st
from transformers import pipeline
# Load audio classification model
audio_classifier = pipeline(
"audio-classification", model="HareemFatima/distilhubert-finetuned-stutterdetection"
)
# Load text-to-speech model (replace with your TTS model details)
# Placeholder text-to-speech function (replace with your actual implementation)
def tts(text):
# Replace this with your text-to-speech processing logic
# This is a placeholder to demonstrate the concept
return f"Synthesized speech for therapy: {text}"
# Define therapy text for different stutter types (replace with your specific content)
therapy_text = {
"Normal Speech": "Your speech sounds great! Keep practicing!",
"Blocking": "Take a deep breath and try speaking slowly. You can do it!",
"Prolongation": "Focus on relaxing your mouth muscles and speaking smoothly.",
# Add more stutter types and therapy text here
}
st.title("Stuttering Therapy Assistant")
st.write("This app helps you identify stuttering types and provides personalized therapy suggestions.")
uploaded_audio = st.file_uploader("Upload Audio Clip")
if uploaded_audio is not None:
# Read audio data
audio_bytes = uploaded_audio.read()
# Classify stuttering type
prediction = audio_classifier(audio_bytes)
stutter_type = prediction[0]["label"]
# Retrieve therapy text
therapy = therapy_text.get(stutter_type, "General therapy tip: Practice slow, relaxed speech.")
# Generate synthesized speech (placeholder for now)
synthesized_speech = tts(therapy)
st.write(f"Predicted Stutter Type: {stutter_type}")
st.write(f"Therapy Tip: {therapy}")
st.audio(synthesized_speech) # Placeholder audio output (replace with actual synthesized speech)