Spaces:
Runtime error
Runtime error
File size: 4,433 Bytes
2250554 7399125 2250554 fb9cc26 5e10bd7 2250554 5e10bd7 1c44021 9f390b9 1c44021 9d8f96d 1459369 9f390b9 9d8f96d 2ab3e7e 2250554 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 |
import streamlit as st
import os
from streamlit_chat import message
from streamlit_extras.colored_header import colored_header
from streamlit_extras.add_vertical_space import add_vertical_space
from streamlit_mic_recorder import speech_to_text
from model_pipeline import ModelPipeLine
from q_learning_chatbot import QLearningChatbot
from gtts import gTTS
from io import BytesIO
mdl = ModelPipeLine()
final_chain = mdl.create_final_chain()
st.set_page_config(page_title="PeacePal")
# Define states and actions
states = [
"Negative",
"Moderately Negative",
"Neutral",
"Moderately Positive",
"Positive",
]
# Add logo to the sidebar
#logo_path = os.path.join('images', 'logo.jpeg')
#st.sidebar.image(logo_path, use_column_width=True)
# Add image to the sidebar
image_path = os.path.join('images', 'sidebar.jpg')
st.sidebar.image(image_path, use_column_width=True)
st.title('PeacePal 🌱')
## generated stores AI generated responses
if 'generated' not in st.session_state:
st.session_state['generated'] = ["I'm your Mental health Assistant, How may I help you?"]
## past stores User's questions
if 'past' not in st.session_state:
st.session_state['past'] = ['Hi!']
# Layout of input/response containers
colored_header(label='', description='', color_name='blue-30')
response_container = st.container()
input_container = st.container()
# User input
## Function for taking user provided prompt as input
def get_text():
input_text = st.text_input("You: ", "", key="input")
return input_text
def generate_response(prompt):
response = mdl.call_conversational_rag(prompt,final_chain)
return response['answer']
def text_to_speech(text):
# Use gTTS to convert text to speech
tts = gTTS(text=text, lang='en')
# Save the speech as bytes in memory
fp = BytesIO()
tts.write_to_fp(fp)
return fp
def speech_recognition_callback():
# Ensure that speech output is available
if st.session_state.my_stt_output is None:
st.session_state.p01_error_message = "Please record your response again."
return
# Clear any previous error messages
st.session_state.p01_error_message = None
# Store the speech output in the session state
st.session_state.speech_input = st.session_state.my_stt_output
## Applying the user input box
with input_container:
# Add a radio button to choose input mode
input_mode = st.radio("Select input mode:", ["Text", "Speech"])
if input_mode == "Speech":
# Use the speech_to_text function to capture speech input
speech_input = speech_to_text(
key='my_stt',
callback=speech_recognition_callback
)
# Check if speech input is available
if 'speech_input' in st.session_state and st.session_state.speech_input:
# Display the speech input
st.text(f"Speech Input: {st.session_state.speech_input}")
# Process the speech input as a query
query = st.session_state.speech_input
with st.spinner("processing....."):
response = generate_response(query)
st.session_state.past.append(query)
st.session_state.generated.append(response)
# Convert the response to speech
speech_fp = text_to_speech(response)
# Play the speech
st.audio(speech_fp, format='audio/mp3')
else:
# Add a text input field for query
query = st.text_input("Query: ", key="input")
# Process the query if it's not empty
if query:
with st.spinner("typing....."):
response = generate_response(query)
st.session_state.past.append(query)
st.session_state.generated.append(response)
# Convert the response to speech
speech_fp = text_to_speech(response)
# Play the speech
st.audio(speech_fp, format='audio/mp3')
## Conditional display of AI generated responses as a function of user provided prompts
with response_container:
if st.session_state['generated']:
for i in range(len(st.session_state['generated'])):
message(st.session_state['past'][i], is_user=True, key=str(i) + '_user')
message(st.session_state["generated"][i], key=str(i))
|