ASledziewska commited on
Commit
8465f96
1 Parent(s): 8ff5a08

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -6
app.py CHANGED
@@ -3,9 +3,10 @@ import streamlit as st
3
  from q_learning_chatbot import QLearningChatbot
4
  from xgb_mental_health import MentalHealthClassifier
5
  from bm25_retreive_question import QuestionRetriever as QuestionRetriever_bm25
6
- from Chromadb_storage import QuestionRetriever as QuestionRetriever_chromaDB
7
  from llm_response_generator import LLLResponseGenerator
8
  import os
 
9
  # Streamlit UI
10
  st.title("FOMO Fix - RL-based Mental Health Assistant")
11
 
@@ -26,7 +27,7 @@ chatbot = QLearningChatbot(states, actions)
26
  # data_path = "/Users/jaelinlee/Documents/projects/fomo/input/data.csv"
27
  data_path = "data/data.csv"
28
  tokenizer_model_name = "nlptown/bert-base-multilingual-uncased-sentiment"
29
- mental_classifier_model_path = "mental_health_model.pkl"
30
  mental_classifier = MentalHealthClassifier(data_path, mental_classifier_model_path)
31
 
32
 
@@ -147,6 +148,10 @@ if user_message:
147
  ai_tone = chatbot.get_action(user_sentiment)
148
  print(ai_tone)
149
 
 
 
 
 
150
  # LLM Response Generator
151
  HUGGINGFACEHUB_API_TOKEN = os.getenv('HUGGINGFACEHUB_API_TOKEN')
152
 
@@ -154,18 +159,19 @@ if user_message:
154
  temperature = 0.1
155
  max_length = 128
156
 
 
 
 
157
  #Question asked to the user: {question}
158
 
159
  template = """INSTRUCTIONS: {context}
160
 
161
  Respond to the user with a tone of {ai_tone}.
162
 
163
- Question asked to the user: "None"
164
-
165
  Response by the user: {user_text}
166
  Response;
167
  """
168
- context = "You are a mental health supporting non-medical assistant. Provide some advice and ask a relevant question back to the user."
169
 
170
  llm_response = llm_model.llm_inference(
171
  model_type="huggingface",
@@ -184,6 +190,7 @@ if user_message:
184
  else:
185
  llm_reponse_with_quesiton = llm_response
186
 
 
187
  st.session_state.messages.append({"role": "ai", "content": llm_reponse_with_quesiton})
188
 
189
  with st.chat_message("ai"):
@@ -220,7 +227,7 @@ with st.sidebar.expander('Behind the Scene', expanded=section_visible):
220
  st.write(f"- AI Tone: {st.session_state.ai_tone.capitalize()}")
221
  st.write(f"- Question retrieved from: {selected_retriever_option}")
222
  st.write(
223
- f"- If the user feels negative or moderately negative, at the end of the AI response, it adds a mental health condition related question. The question is retrieved from DB. The categories of questions are limited to Depression, Anxiety, and ADHD which are most associated with FOMO related to excessive social media usage."
224
  )
225
  st.write(
226
  f"- Below q-table is continuously updated after each interaction with the user. If the user's mood increases, AI gets a reward. Else, AI gets a punishment."
 
3
  from q_learning_chatbot import QLearningChatbot
4
  from xgb_mental_health import MentalHealthClassifier
5
  from bm25_retreive_question import QuestionRetriever as QuestionRetriever_bm25
6
+ from Chromadb_storage_JyotiNigam import QuestionRetriever as QuestionRetriever_chromaDB
7
  from llm_response_generator import LLLResponseGenerator
8
  import os
9
+
10
  # Streamlit UI
11
  st.title("FOMO Fix - RL-based Mental Health Assistant")
12
 
 
27
  # data_path = "/Users/jaelinlee/Documents/projects/fomo/input/data.csv"
28
  data_path = "data/data.csv"
29
  tokenizer_model_name = "nlptown/bert-base-multilingual-uncased-sentiment"
30
+ mental_classifier_model_path = "app/mental_health_model.pkl"
31
  mental_classifier = MentalHealthClassifier(data_path, mental_classifier_model_path)
32
 
33
 
 
148
  ai_tone = chatbot.get_action(user_sentiment)
149
  print(ai_tone)
150
 
151
+ print(st.session_state.messages)
152
+
153
+
154
+
155
  # LLM Response Generator
156
  HUGGINGFACEHUB_API_TOKEN = os.getenv('HUGGINGFACEHUB_API_TOKEN')
157
 
 
159
  temperature = 0.1
160
  max_length = 128
161
 
162
+ # Collect all messages exchanged so far into a single text string
163
+ all_messages = "\n".join([message.get("content") for message in st.session_state.messages])
164
+
165
  #Question asked to the user: {question}
166
 
167
  template = """INSTRUCTIONS: {context}
168
 
169
  Respond to the user with a tone of {ai_tone}.
170
 
 
 
171
  Response by the user: {user_text}
172
  Response;
173
  """
174
+ context = f"You are a mental health supporting non-medical assistant. Provide some advice and ask a relevant question back to the user. {all_messages}"
175
 
176
  llm_response = llm_model.llm_inference(
177
  model_type="huggingface",
 
190
  else:
191
  llm_reponse_with_quesiton = llm_response
192
 
193
+ # Append the user and AI responses to the chat history
194
  st.session_state.messages.append({"role": "ai", "content": llm_reponse_with_quesiton})
195
 
196
  with st.chat_message("ai"):
 
227
  st.write(f"- AI Tone: {st.session_state.ai_tone.capitalize()}")
228
  st.write(f"- Question retrieved from: {selected_retriever_option}")
229
  st.write(
230
+ f"- If the user feels negative, moderately negative, or neutral, at the end of the AI response, it adds a mental health condition related question. The question is retrieved from DB. The categories of questions are limited to Depression, Anxiety, and ADHD which are most associated with FOMO related to excessive social media usage."
231
  )
232
  st.write(
233
  f"- Below q-table is continuously updated after each interaction with the user. If the user's mood increases, AI gets a reward. Else, AI gets a punishment."