Spaces:
Sleeping
Sleeping
ASledziewska
commited on
Commit
·
4a70236
1
Parent(s):
a6a602a
updated API key reference
Browse files- app.py +5 -6
- llm_response_generator.py +14 -15
app.py
CHANGED
@@ -7,7 +7,6 @@
|
|
7 |
# - Updated to UI to show predicted mental health condition in behind the scence regardless of the ositive/negative sentiment
|
8 |
###
|
9 |
|
10 |
-
from dotenv import load_dotenv, find_dotenv
|
11 |
import pandas as pd
|
12 |
import streamlit as st
|
13 |
from q_learning_chatbot import QLearningChatbot
|
@@ -251,7 +250,7 @@ if user_message:
|
|
251 |
print(st.session_state.messages)
|
252 |
|
253 |
# LLM Response Generator
|
254 |
-
|
255 |
|
256 |
llm_model = LLLResponseGenerator()
|
257 |
temperature = 0.5
|
@@ -265,10 +264,10 @@ if user_message:
|
|
265 |
# Question asked to the user: {question}
|
266 |
|
267 |
template = """INSTRUCTIONS: {context}
|
268 |
-
|
269 |
-
Respond to the user with a tone of {ai_tone}.
|
270 |
-
|
271 |
-
Response by the user: {user_text}
|
272 |
Response;
|
273 |
"""
|
274 |
context = f"You are a mental health supporting non-medical assistant. Provide some advice and ask a relevant question back to the user. {all_messages}"
|
|
|
7 |
# - Updated to UI to show predicted mental health condition in behind the scence regardless of the ositive/negative sentiment
|
8 |
###
|
9 |
|
|
|
10 |
import pandas as pd
|
11 |
import streamlit as st
|
12 |
from q_learning_chatbot import QLearningChatbot
|
|
|
250 |
print(st.session_state.messages)
|
251 |
|
252 |
# LLM Response Generator
|
253 |
+
HUGGINGFACEHUB_API_TOKEN = os.getenv('HUGGINGFACEHUB_API_TOKEN')
|
254 |
|
255 |
llm_model = LLLResponseGenerator()
|
256 |
temperature = 0.5
|
|
|
264 |
# Question asked to the user: {question}
|
265 |
|
266 |
template = """INSTRUCTIONS: {context}
|
267 |
+
|
268 |
+
Respond to the user with a tone of {ai_tone}.
|
269 |
+
|
270 |
+
Response by the user: {user_text}
|
271 |
Response;
|
272 |
"""
|
273 |
context = f"You are a mental health supporting non-medical assistant. Provide some advice and ask a relevant question back to the user. {all_messages}"
|
llm_response_generator.py
CHANGED
@@ -2,14 +2,13 @@
|
|
2 |
#- Author: Jaelin Lee
|
3 |
#- Date: Mar 16, 2024
|
4 |
#- Description: Calls HuggingFace API to generate natural response.
|
5 |
-
#- Credit: The initial code is from Abhishek Dutta.
|
6 |
-
# Most of the code is kept as he created.
|
7 |
# I only added a modification to convert it to class.
|
8 |
# And, I tweaked the prompt to feed into the `streamlit_app.py` file.
|
9 |
#---
|
10 |
|
11 |
import os
|
12 |
-
from dotenv import load_dotenv, find_dotenv
|
13 |
from langchain_community.llms import HuggingFaceHub
|
14 |
from langchain_community.llms import OpenAI
|
15 |
# from langchain.llms import HuggingFaceHub, OpenAI
|
@@ -23,7 +22,7 @@ class LLLResponseGenerator():
|
|
23 |
|
24 |
def __init__(self):
|
25 |
print("initialized")
|
26 |
-
|
27 |
|
28 |
def llm_inference(
|
29 |
self,
|
@@ -117,7 +116,7 @@ class LLLResponseGenerator():
|
|
117 |
|
118 |
if __name__ == "__main__":
|
119 |
# Please ensure you have a .env file available with 'HUGGINGFACEHUB_API_TOKEN' and 'OPENAI_API_KEY' values.
|
120 |
-
|
121 |
|
122 |
context = "You are a mental health supporting non-medical assistant. DO NOT PROVIDE any medical advice with conviction."
|
123 |
|
@@ -130,15 +129,15 @@ if __name__ == "__main__":
|
|
130 |
|
131 |
# The user may have signs of {questionnaire}.
|
132 |
template = """INSTRUCTIONS: {context}
|
133 |
-
|
134 |
-
Respond to the user with a tone of {ai_tone}.
|
135 |
-
|
136 |
Question asked to the user: {question}
|
137 |
-
|
138 |
-
Response by the user: {user_text}
|
139 |
-
|
140 |
Provide some advice and ask a relevant question back to the user.
|
141 |
-
|
142 |
Response;
|
143 |
"""
|
144 |
|
@@ -146,7 +145,7 @@ if __name__ == "__main__":
|
|
146 |
max_length = 128
|
147 |
|
148 |
model = LLLResponseGenerator()
|
149 |
-
|
150 |
|
151 |
llm_response = model.llm_inference(
|
152 |
model_type="huggingface",
|
@@ -159,5 +158,5 @@ if __name__ == "__main__":
|
|
159 |
temperature=temperature,
|
160 |
max_length=max_length,
|
161 |
)
|
162 |
-
|
163 |
-
print(llm_response)
|
|
|
2 |
#- Author: Jaelin Lee
|
3 |
#- Date: Mar 16, 2024
|
4 |
#- Description: Calls HuggingFace API to generate natural response.
|
5 |
+
#- Credit: The initial code is from Abhishek Dutta.
|
6 |
+
# Most of the code is kept as he created.
|
7 |
# I only added a modification to convert it to class.
|
8 |
# And, I tweaked the prompt to feed into the `streamlit_app.py` file.
|
9 |
#---
|
10 |
|
11 |
import os
|
|
|
12 |
from langchain_community.llms import HuggingFaceHub
|
13 |
from langchain_community.llms import OpenAI
|
14 |
# from langchain.llms import HuggingFaceHub, OpenAI
|
|
|
22 |
|
23 |
def __init__(self):
|
24 |
print("initialized")
|
25 |
+
|
26 |
|
27 |
def llm_inference(
|
28 |
self,
|
|
|
116 |
|
117 |
if __name__ == "__main__":
|
118 |
# Please ensure you have a .env file available with 'HUGGINGFACEHUB_API_TOKEN' and 'OPENAI_API_KEY' values.
|
119 |
+
HUGGINGFACEHUB_API_TOKEN = os.getenv('HUGGINGFACEHUB_API_TOKEN')
|
120 |
|
121 |
context = "You are a mental health supporting non-medical assistant. DO NOT PROVIDE any medical advice with conviction."
|
122 |
|
|
|
129 |
|
130 |
# The user may have signs of {questionnaire}.
|
131 |
template = """INSTRUCTIONS: {context}
|
132 |
+
|
133 |
+
Respond to the user with a tone of {ai_tone}.
|
134 |
+
|
135 |
Question asked to the user: {question}
|
136 |
+
|
137 |
+
Response by the user: {user_text}
|
138 |
+
|
139 |
Provide some advice and ask a relevant question back to the user.
|
140 |
+
|
141 |
Response;
|
142 |
"""
|
143 |
|
|
|
145 |
max_length = 128
|
146 |
|
147 |
model = LLLResponseGenerator()
|
148 |
+
|
149 |
|
150 |
llm_response = model.llm_inference(
|
151 |
model_type="huggingface",
|
|
|
158 |
temperature=temperature,
|
159 |
max_length=max_length,
|
160 |
)
|
161 |
+
|
162 |
+
print(llm_response)
|