jaelin215 commited on
Commit
bfa5372
1 Parent(s): 6df770d

updated prompt not to ask any question

Browse files
Files changed (1) hide show
  1. app.py +29 -27
app.py CHANGED
@@ -7,6 +7,7 @@
7
  # - Updated to UI to show predicted mental health condition in behind the scence regardless of the ositive/negative sentiment
8
  ###
9
 
 
10
  import pandas as pd
11
  import streamlit as st
12
  from q_learning_chatbot import QLearningChatbot
@@ -48,16 +49,16 @@ tokenizer_model_name = "nlptown/bert-base-multilingual-uncased-sentiment"
48
  mental_classifier_model_path = "mental_health_model.pkl"
49
  mental_classifier = MentalHealthClassifier(data_path, mental_classifier_model_path)
50
 
51
- # if not os.path.exists(mental_classifier_model_path):
52
- # mental_classifier.initialize_tokenizer(tokenizer_model_name)
53
- # X, y = mental_classifier.preprocess_data()
54
- # y_test, y_pred = mental_classifier.train_model(X, y)
55
- # mental_classifier.save_model()
56
- # else:
57
- # mental_classifier.load_model()
58
- # mental_classifier.initialize_tokenizer(tokenizer_model_name) # Ensure tokenizer is initialized if loading model from pickle
59
- # X, y = mental_classifier.preprocess_data() # Preprocess data again if needed
60
- # mental_classifier.model.fit(X, y) # Fit the loaded model to the data
61
 
62
  # Function to display Q-table
63
  def display_q_table(q_values, states, actions):
@@ -94,6 +95,10 @@ def speech_recognition_callback():
94
  def remove_html_tags(text):
95
  # clean_text = re.sub("<.*?>", "", text)
96
  clean_text = re.sub(r'<.*?>|- |"|\\n', '', text)
 
 
 
 
97
  return clean_text
98
 
99
  # Initialize memory
@@ -162,7 +167,6 @@ llama_guard_enabled = st.sidebar.checkbox(
162
  key="llama_guard_toggle",
163
  )
164
 
165
-
166
  # Update the session state based on the checkbox interaction
167
  st.session_state["llama_guard_enabled"] = llama_guard_enabled
168
 
@@ -212,6 +216,7 @@ if user_message:
212
  question = retriever.get_response(
213
  user_message, predicted_mental_category
214
  )
 
215
  show_question = True
216
  else:
217
  show_question = False
@@ -260,7 +265,7 @@ if user_message:
260
  print(st.session_state.messages)
261
 
262
  # LLM Response Generator
263
- HUGGINGFACEHUB_API_TOKEN = os.getenv('HUGGINGFACEHUB_API_TOKEN')
264
 
265
  llm_model = LLLResponseGenerator()
266
  temperature = 0.5
@@ -274,15 +279,15 @@ if user_message:
274
  # Question asked to the user: {question}
275
 
276
  template = """INSTRUCTIONS: {context}
277
-
278
- Respond to the user with a tone of {ai_tone}.
279
-
280
- Response by the user: {user_text}
281
  Response;
282
  """
283
- context = f"You are a mental health supporting non-medical assistant. Provide some advice and ask a relevant question back to the user. {all_messages}"
284
  # context = f"You are a Mindful Media Mentor, dedicated to providing compassionate support and guidance to users facing mental health challenges. Your goal is to foster a safe and understanding environment where users feel heard and supported. Draw from your expertise to offer practical advice and resources, and encourage users to explore their feelings and experiences openly. Your responses should aim to empower users to take positive steps towards their well-being. {all_messages}"
285
-
286
  llm_response = llm_model.llm_inference(
287
  model_type="huggingface",
288
  question=question,
@@ -337,16 +342,14 @@ if user_message:
337
  st.write(
338
  f"- Detected User Tone: {st.session_state.user_sentiment} ({st.session_state.mood_trend.capitalize()}{st.session_state.mood_trend_symbol})"
339
  )
340
- # if st.session_state.show_question:
341
- st.write(
342
- f"- Possible Mental Condition: {st.session_state.predicted_mental_category.capitalize()}"
343
- )
344
  st.write(f"- AI Tone: {st.session_state.ai_tone.capitalize()}")
345
-
346
-
347
  # Display Q-table
348
  st.dataframe(display_q_table(chatbot.q_values, states, actions))
349
-
350
  st.write("-----------------------")
351
  st.write(
352
  f"- Above q-table is continuously updated after each interaction with the user. If the user's mood increases, AI gets a reward. Else, AI gets a punishment."
@@ -354,5 +357,4 @@ if user_message:
354
  st.write(f"- Question retrieved from: {selected_retriever_option}")
355
  st.write(
356
  f"- If the user feels negative, moderately negative, or neutral, at the end of the AI response, it adds a mental health condition related question. The question is retrieved from DB. The categories of questions are limited to Depression, Anxiety, ADHD, Social Media Addiction, Social Isolation, and Cyberbullying which are most associated with FOMO related to excessive social media usage."
357
- )
358
-
 
7
  # - Updated to UI to show predicted mental health condition in behind the scence regardless of the ositive/negative sentiment
8
  ###
9
 
10
+ from dotenv import load_dotenv, find_dotenv
11
  import pandas as pd
12
  import streamlit as st
13
  from q_learning_chatbot import QLearningChatbot
 
49
  mental_classifier_model_path = "mental_health_model.pkl"
50
  mental_classifier = MentalHealthClassifier(data_path, mental_classifier_model_path)
51
 
52
+ if not os.path.exists(mental_classifier_model_path):
53
+ mental_classifier.initialize_tokenizer(tokenizer_model_name)
54
+ X, y = mental_classifier.preprocess_data()
55
+ y_test, y_pred = mental_classifier.train_model(X, y)
56
+ mental_classifier.save_model()
57
+ else:
58
+ mental_classifier.load_model()
59
+ mental_classifier.initialize_tokenizer(tokenizer_model_name) # Ensure tokenizer is initialized if loading model from pickle
60
+ # X, y = mental_classifier.preprocess_data() # Preprocess data again if needed
61
+ # mental_classifier.model.fit(X, y) # Fit the loaded model to the data
62
 
63
  # Function to display Q-table
64
  def display_q_table(q_values, states, actions):
 
95
  def remove_html_tags(text):
96
  # clean_text = re.sub("<.*?>", "", text)
97
  clean_text = re.sub(r'<.*?>|- |"|\\n', '', text)
98
+ # Remove indentation
99
+ clean_text = clean_text.strip()
100
+ # Remove new lines
101
+ clean_text = clean_text.replace('\n', ' ')
102
  return clean_text
103
 
104
  # Initialize memory
 
167
  key="llama_guard_toggle",
168
  )
169
 
 
170
  # Update the session state based on the checkbox interaction
171
  st.session_state["llama_guard_enabled"] = llama_guard_enabled
172
 
 
216
  question = retriever.get_response(
217
  user_message, predicted_mental_category
218
  )
219
+ st.session_state.asked_questions.append(question)
220
  show_question = True
221
  else:
222
  show_question = False
 
265
  print(st.session_state.messages)
266
 
267
  # LLM Response Generator
268
+ load_dotenv(find_dotenv())
269
 
270
  llm_model = LLLResponseGenerator()
271
  temperature = 0.5
 
279
  # Question asked to the user: {question}
280
 
281
  template = """INSTRUCTIONS: {context}
282
+
283
+ Respond to the user with a tone of {ai_tone}.
284
+
285
+ Response by the user: {user_text}
286
  Response;
287
  """
288
+ context = f"You are a mental health supporting non-medical assistant. Provide brief advice. DO NOT ASK ANY QUESTION. DO NOT REPEAT YOURSELF. {all_messages}" # and ask a relevant question back to the user
289
  # context = f"You are a Mindful Media Mentor, dedicated to providing compassionate support and guidance to users facing mental health challenges. Your goal is to foster a safe and understanding environment where users feel heard and supported. Draw from your expertise to offer practical advice and resources, and encourage users to explore their feelings and experiences openly. Your responses should aim to empower users to take positive steps towards their well-being. {all_messages}"
290
+
291
  llm_response = llm_model.llm_inference(
292
  model_type="huggingface",
293
  question=question,
 
342
  st.write(
343
  f"- Detected User Tone: {st.session_state.user_sentiment} ({st.session_state.mood_trend.capitalize()}{st.session_state.mood_trend_symbol})"
344
  )
345
+ if st.session_state.show_question:
346
+ st.write(
347
+ f"- Possible Mental Condition: {st.session_state.predicted_mental_category.capitalize()}"
348
+ )
349
  st.write(f"- AI Tone: {st.session_state.ai_tone.capitalize()}")
350
+
 
351
  # Display Q-table
352
  st.dataframe(display_q_table(chatbot.q_values, states, actions))
 
353
  st.write("-----------------------")
354
  st.write(
355
  f"- Above q-table is continuously updated after each interaction with the user. If the user's mood increases, AI gets a reward. Else, AI gets a punishment."
 
357
  st.write(f"- Question retrieved from: {selected_retriever_option}")
358
  st.write(
359
  f"- If the user feels negative, moderately negative, or neutral, at the end of the AI response, it adds a mental health condition related question. The question is retrieved from DB. The categories of questions are limited to Depression, Anxiety, ADHD, Social Media Addiction, Social Isolation, and Cyberbullying which are most associated with FOMO related to excessive social media usage."
360
+ )