Yew Chong commited on
Commit
8dd7b5f
1 Parent(s): 8373e84

fix streamlit secrets

Browse files
Files changed (3) hide show
  1. .gitignore +4 -1
  2. db_firestore.py +1 -1
  3. streamlit/app8.py +16 -3
.gitignore CHANGED
@@ -25,4 +25,7 @@ test*.html
25
  test*.ipynb
26
 
27
  ## Images
28
- *.png
 
 
 
 
25
  test*.ipynb
26
 
27
  ## Images
28
+ *.png
29
+
30
+ # streamlit
31
+ .streamlit/secrets.toml
db_firestore.py CHANGED
@@ -12,7 +12,7 @@ try:
12
  except TypeError:
13
  import streamlit as st
14
  os.environ["FIREBASE_CREDENTIAL"] = st.secrets["FIREBASE_CREDENTIAL"]
15
- cred = credentials.Certificate(json.loads(os.environ.get("FIREBASE_CREDENTIAL")))
16
  firebase_admin.initialize_app(cred,{'storageBucket': 'healthhack-store.appspot.com'}) # connecting to firebase
17
 
18
 
 
12
  except TypeError:
13
  import streamlit as st
14
  os.environ["FIREBASE_CREDENTIAL"] = st.secrets["FIREBASE_CREDENTIAL"]
15
+ cred = credentials.Certificate(json.loads(os.environ.get("FIREBASE_CREDENTIAL"), strict=False))
16
  firebase_admin.initialize_app(cred,{'storageBucket': 'healthhack-store.appspot.com'}) # connecting to firebase
17
 
18
 
streamlit/app8.py CHANGED
@@ -27,7 +27,6 @@ from langchain_core.output_parsers import StrOutputParser
27
  from langchain_core.runnables import RunnablePassthrough
28
 
29
  import langchain_community.embeddings.huggingface
30
- # help(langchain_community.embeddings.huggingface)
31
  from langchain_community.embeddings.huggingface import HuggingFaceBgeEmbeddings
32
  from langchain_community.vectorstores import FAISS
33
 
@@ -111,6 +110,11 @@ Question:
111
  Remember, answer in a short and sweet manner, don't talk too much.
112
  Your reply:
113
  """
 
 
 
 
 
114
 
115
  prompt = PromptTemplate(
116
  input_variables = ["question", "context"],
@@ -129,7 +133,9 @@ if "memory" not in st.session_state:
129
  memory = st.session_state.memory
130
 
131
 
132
- if "chain" not in st.session_state:
 
 
133
  st.session_state.chain = (
134
  {
135
  "context": retriever | format_docs,
@@ -205,6 +211,11 @@ Student's final diagnosis:
205
 
206
  Your grade:
207
  """
 
 
 
 
 
208
 
209
  prompt2 = PromptTemplate(
210
  input_variables = ["question", "context", "history"],
@@ -227,7 +238,9 @@ memory2 = st.session_state.memory2
227
  def x(_):
228
  return fake_history
229
 
230
- if "chain2" not in st.session_state:
 
 
231
  st.session_state.chain2 = (
232
  {
233
  "context": retriever | format_docs,
 
27
  from langchain_core.runnables import RunnablePassthrough
28
 
29
  import langchain_community.embeddings.huggingface
 
30
  from langchain_community.embeddings.huggingface import HuggingFaceBgeEmbeddings
31
  from langchain_community.vectorstores import FAISS
32
 
 
110
  Remember, answer in a short and sweet manner, don't talk too much.
111
  Your reply:
112
  """
113
+ if "TEMPLATE" not in st.session_state:
114
+ st.session_state.TEMPLATE = TEMPLATE
115
+
116
+ with st.expander("Patient Prompt"):
117
+ TEMPLATE = st.text_area("Patient Prompt", value=TEMPLATE)
118
 
119
  prompt = PromptTemplate(
120
  input_variables = ["question", "context"],
 
133
  memory = st.session_state.memory
134
 
135
 
136
+ if ("chain" not in st.session_state
137
+ or
138
+ st.session_state.TEMPLATE != TEMPLATE):
139
  st.session_state.chain = (
140
  {
141
  "context": retriever | format_docs,
 
211
 
212
  Your grade:
213
  """
214
+ if "TEMPLATE2" not in st.session_state:
215
+ st.session_state.TEMPLATE2 = TEMPLATE2
216
+
217
+ with st.expander("Grader Prompt"):
218
+ TEMPLATE2 = st.text_area("Grader Prompt", value=TEMPLATE2)
219
 
220
  prompt2 = PromptTemplate(
221
  input_variables = ["question", "context", "history"],
 
238
  def x(_):
239
  return fake_history
240
 
241
+ if ("chain2" not in st.session_state
242
+ or
243
+ st.session_state.TEMPLATE2 != TEMPLATE2):
244
  st.session_state.chain2 = (
245
  {
246
  "context": retriever | format_docs,