File size: 8,628 Bytes
2250554
7399125
2250554
 
 
 
fb9cc26
5e10bd7
2250554
 
 
0ca1b78
 
 
 
 
 
2250554
 
ce8e0a3
 
 
2250554
 
5e10bd7
 
 
 
 
 
 
 
1c44021
0ca1b78
 
1c44021
0ca1b78
 
 
 
 
 
 
 
 
 
 
 
 
9d8f96d
0ca1b78
 
 
 
 
 
 
 
 
 
 
 
2250554
 
 
 
 
 
 
 
0ca1b78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2250554
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c32307d
36196b5
2250554
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0ca1b78
8726f97
0ca1b78
de2ee39
2250554
0ca1b78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2250554
 
 
 
0ca1b78
2250554
 
 
 
 
 
 
 
 
 
0ca1b78
8726f97
0ca1b78
de2ee39
0ca1b78
 
 
 
2250554
0ca1b78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2250554
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
import streamlit as st
import os
from streamlit_chat import message
from streamlit_extras.colored_header import colored_header
from streamlit_extras.add_vertical_space import add_vertical_space
from streamlit_mic_recorder import speech_to_text
from model_pipeline import ModelPipeLine
from q_learning_chatbot import QLearningChatbot

from gtts import gTTS
from io import BytesIO
st.set_page_config(page_title="PeacePal") 
#image to the sidebar
image_path = os.path.join('images', 'sidebar.jpg')
st.sidebar.image(image_path, use_column_width=True)

st.title('PeacePal 🌱')

mdl = ModelPipeLine()
# Now you can access the retriever attribute of the ModelPipeLine instance
retriever = mdl.retriever

final_chain = mdl.create_final_chain()

# Define states and actions
states = [
    "Negative",
    "Moderately Negative",
    "Neutral",
    "Moderately Positive",
    "Positive",
]

# Initialize Q-learning chatbot and mental health classifier
chatbot = QLearningChatbot(states)

# Function to display Q-table
def display_q_table(q_values, states):
    q_table_dict = {"State": states}
    q_table_df = pd.DataFrame(q_table_dict)
    return q_table_df

def text_to_speech(text):
    # Use gTTS to convert text to speech
    tts = gTTS(text=text, lang="en")
    # Save the speech as bytes in memory
    fp = BytesIO()
    tts.write_to_fp(fp)
    return fp


def speech_recognition_callback():
    # Ensure that speech output is available
    if st.session_state.my_stt_output is None:
        st.session_state.p01_error_message = "Please record your response again."
        return

    # Clear any previous error messages
    st.session_state.p01_error_message = None

    # Store the speech output in the session state
    st.session_state.speech_input = st.session_state.my_stt_output 

## generated stores AI generated responses
if 'generated' not in st.session_state:
    st.session_state['generated'] = ["I'm your Mental health Assistant, How may I help you?"]
## past stores User's questions
if 'past' not in st.session_state:
    st.session_state['past'] = ['Hi!']

# Initialize memory
if "entered_text" not in st.session_state:
    st.session_state.entered_text = []
if "entered_mood" not in st.session_state:
    st.session_state.entered_mood = []
if "messages" not in st.session_state:
    st.session_state.messages = []
if "user_sentiment" not in st.session_state:
    st.session_state.user_sentiment = "Neutral"
if "mood_trend" not in st.session_state:
    st.session_state.mood_trend = "Unchanged"
if "mood_trend_symbol" not in st.session_state:
    st.session_state.mood_trend_symbol = ""
if "show_question" not in st.session_state:
    st.session_state.show_question = False
if "asked_questions" not in st.session_state:
    st.session_state.asked_questions = []

# Layout of input/response containers

colored_header(label='', description='', color_name='blue-30')
response_container = st.container()
input_container = st.container()

# User input
## Function for taking user provided prompt as input
def get_text():
    input_text = st.text_input("You: ", "", key="input")
    return input_text

def generate_response(prompt):
    response = mdl.call_conversational_rag(prompt,final_chain)
    return response['answer']
        
     
## Applying the user input box        
with input_container:
    # Add a radio button to choose input mode
    input_mode = st.radio("Select input mode:", ["Text", "Speech"])

    if input_mode == "Speech":
        # Use the speech_to_text function to capture speech input
        speech_input = speech_to_text(
            key='my_stt', 
            callback=speech_recognition_callback
        )

        # Check if speech input is available
        if 'speech_input' in st.session_state and st.session_state.speech_input:
            # Display the speech input
            st.text(f"Speech Input: {st.session_state.speech_input}")
            
            # Process the speech input as a query
            query = st.session_state.speech_input
            with st.spinner("processing....."):
                response = generate_response(query)
                st.session_state.past.append(query)
                st.session_state.generated.append(response)
                # Detect sentiment
                user_sentiment = chatbot.detect_sentiment(query)
    
    
                
                # Update mood history / mood_trend
                chatbot.update_mood_history()
                mood_trend = chatbot.check_mood_trend()
    
                # Define rewards
                if user_sentiment in ["Positive", "Moderately Positive"]:
                    if mood_trend == "increased":
                        reward = +1
                        mood_trend_symbol = " ⬆️"
                    elif mood_trend == "unchanged":
                        reward = +0.8
                        mood_trend_symbol = ""
                    else:  # decreased
                        reward = -0.2
                        mood_trend_symbol = " ⬇️"
                else:
                    if mood_trend == "increased":
                        reward = +1
                        mood_trend_symbol = " ⬆️"
                    elif mood_trend == "unchanged":
                        reward = -0.2
                        mood_trend_symbol = ""
                    else:  # decreased
                        reward = -1
                        mood_trend_symbol = " ⬇️"
    
                print(
                    f"mood_trend - sentiment - reward: {mood_trend} - {user_sentiment} - 🛑{reward}🛑"
                )
    
                # Update Q-values
                chatbot.update_q_values(
                    user_sentiment, reward, user_sentiment
                )

                # Convert the response to speech
                speech_fp = text_to_speech(response)
                # Play the speech
                st.audio(speech_fp, format='audio/mp3')

    else:
        # Add a text input field for query
        query = st.text_input("Query: ", key="input")

        # Process the query if it's not empty
        if query:
            with st.spinner("typing....."):
                response = generate_response(query)
                st.session_state.past.append(query)
                st.session_state.generated.append(response)
                # Detect sentiment
                user_sentiment = chatbot.detect_sentiment(query)
    
               
                # Convert the response to speech
                speech_fp = text_to_speech(response)
                # Play the speech
                st.audio(speech_fp, format='audio/mp3')
                
                # Update mood history / mood_trend
                chatbot.update_mood_history()
                mood_trend = chatbot.check_mood_trend()
    
                # Define rewards
                if user_sentiment in ["Positive", "Moderately Positive"]:
                    if mood_trend == "increased":
                        reward = +1
                        mood_trend_symbol = " ⬆️"
                    elif mood_trend == "unchanged":
                        reward = +0.8
                        mood_trend_symbol = ""
                    else:  # decreased
                        reward = -0.2
                        mood_trend_symbol = " ⬇️"
                else:
                    if mood_trend == "increased":
                        reward = +1
                        mood_trend_symbol = " ⬆️"
                    elif mood_trend == "unchanged":
                        reward = -0.2
                        mood_trend_symbol = ""
                    else:  # decreased
                        reward = -1
                        mood_trend_symbol = " ⬇️"
    
                print(
                    f"mood_trend - sentiment - reward: {mood_trend} - {user_sentiment} - 🛑{reward}🛑"
                )
    
                # Update Q-values
                chatbot.update_q_values(
                    user_sentiment, reward, user_sentiment
                )

                # Convert the response to speech
                speech_fp = text_to_speech(response)
                # Play the speech
                st.audio(speech_fp, format='audio/mp3')

## Conditional display of AI generated responses as a function of user provided prompts
with response_container:        
    if st.session_state['generated']:
        for i in range(len(st.session_state['generated'])):
            message(st.session_state['past'][i], is_user=True, key=str(i) + '_user')
            message(st.session_state["generated"][i], key=str(i))