jaelin215 commited on
Commit
709ba91
1 Parent(s): 2edb6cf

updated path

Browse files
Files changed (1) hide show
  1. streamlit_app.py +4 -4
streamlit_app.py CHANGED
@@ -42,11 +42,11 @@ chatbot = QLearningChatbot(states, actions)
42
 
43
  # Initialize MentalHealthClassifier
44
  # data_path = "/Users/jaelinlee/Documents/projects/fomo/input/data.csv"
45
- data_path = os.path.join("data", "processed", "data.csv")
46
  print(data_path)
47
 
48
  tokenizer_model_name = "nlptown/bert-base-multilingual-uncased-sentiment"
49
- mental_classifier_model_path = "app/mental_health_model.pkl"
50
  mental_classifier = MentalHealthClassifier(data_path, mental_classifier_model_path)
51
 
52
 
@@ -110,7 +110,7 @@ if "asked_questions" not in st.session_state:
110
  st.session_state.asked_questions = []
111
  # Check if 'llama_guard_enabled' is already in session state, otherwise initialize it
112
  if "llama_guard_enabled" not in st.session_state:
113
- st.session_state["llama_guard_enabled"] = True # Default value to True
114
 
115
  # Select Question Retriever
116
  selected_retriever_option = st.sidebar.selectbox(
@@ -254,7 +254,7 @@ if user_message:
254
  load_dotenv(find_dotenv())
255
 
256
  llm_model = LLLResponseGenerator()
257
- temperature = 0.1
258
  max_length = 128
259
 
260
  # Collect all messages exchanged so far into a single text string
 
42
 
43
  # Initialize MentalHealthClassifier
44
  # data_path = "/Users/jaelinlee/Documents/projects/fomo/input/data.csv"
45
+ data_path = os.path.join("data", "data.csv")
46
  print(data_path)
47
 
48
  tokenizer_model_name = "nlptown/bert-base-multilingual-uncased-sentiment"
49
+ mental_classifier_model_path = "mental_health_model.pkl"
50
  mental_classifier = MentalHealthClassifier(data_path, mental_classifier_model_path)
51
 
52
 
 
110
  st.session_state.asked_questions = []
111
  # Check if 'llama_guard_enabled' is already in session state, otherwise initialize it
112
  if "llama_guard_enabled" not in st.session_state:
113
+ st.session_state["llama_guard_enabled"] = False # Default value to False
114
 
115
  # Select Question Retriever
116
  selected_retriever_option = st.sidebar.selectbox(
 
254
  load_dotenv(find_dotenv())
255
 
256
  llm_model = LLLResponseGenerator()
257
+ temperature = 0.5
258
  max_length = 128
259
 
260
  # Collect all messages exchanged so far into a single text string