Spaces:
Runtime error
Runtime error
Yew Chong
commited on
Commit
•
81302bc
1
Parent(s):
e51f443
update date
Browse files- app_final.py +53 -44
app_final.py
CHANGED
@@ -165,48 +165,7 @@ def init_patient_llm():
|
|
165 |
LLMChain(llm=llm, prompt=prompt, memory=st.session_state.memory, verbose=False)
|
166 |
)
|
167 |
|
168 |
-
def init_grader_llm():
|
169 |
-
st.session_state["patient_chat_history"] = "History\n" + '\n'.join([(sp_mapper.get(i.type, i.type) + ": "+ i.content) for i in st.session_state.memory.chat_memory.messages])
|
170 |
-
## Grader
|
171 |
-
index_name = f"indexes/{st.session_state.scenario_list[st.session_state.selected_scenario]}/Rubric"
|
172 |
-
|
173 |
-
## Reset time
|
174 |
-
st.session_state.start_time = False
|
175 |
-
|
176 |
-
if "store2" not in st.session_state:
|
177 |
-
st.session_state.store2 = db.get_store(index_name, embeddings=embeddings)
|
178 |
-
if "retriever2" not in st.session_state:
|
179 |
-
st.session_state.retriever2 = st.session_state.store2.as_retriever(search_type="similarity", search_kwargs={"k":2})
|
180 |
-
|
181 |
-
## Re-init history
|
182 |
-
st.session_state["patient_chat_history"] = "History\n" + '\n'.join([(sp_mapper.get(i.type, i.type) + ": "+ i.content) for i in st.session_state.memory.chat_memory.messages])
|
183 |
-
|
184 |
-
if ("chain2" not in st.session_state
|
185 |
-
or
|
186 |
-
st.session_state.TEMPLATE2 != TEMPLATE2):
|
187 |
-
st.session_state.chain2 = (
|
188 |
-
RunnableParallel({
|
189 |
-
"context": st.session_state.retriever2 | format_docs,
|
190 |
-
# "history": RunnableLambda(lambda _: "History\n" + '\n'.join([(sp_mapper.get(i.type, i.type) + ": "+ i.content) for i in st.session_state.memory.chat_memory.messages])),
|
191 |
-
"history": (get_patient_chat_history),
|
192 |
-
"question": RunnablePassthrough(),
|
193 |
-
}) |
|
194 |
-
|
195 |
-
# LLMChain(llm=llm_i, prompt=prompt2, verbose=False ) #|
|
196 |
-
LLMChain(llm=llm_gpt4, prompt=prompt2, verbose=False ) #|
|
197 |
-
| {
|
198 |
-
"json": itemgetter("text"),
|
199 |
-
"text": (
|
200 |
-
LLMChain(
|
201 |
-
llm=llm,
|
202 |
-
prompt=PromptTemplate(
|
203 |
-
input_variables=["text"],
|
204 |
-
template="Interpret the following JSON of the student's grades, and do a write-up for each section.\n\n```json\n{text}\n```"),
|
205 |
-
verbose=False)
|
206 |
-
)
|
207 |
-
}
|
208 |
-
)
|
209 |
-
|
210 |
|
211 |
login_info = {
|
212 |
"bob":"builder",
|
@@ -266,7 +225,7 @@ else:
|
|
266 |
if selected_scenario is None or selected_scenario < 0:
|
267 |
st.warning("Please select a scenario!")
|
268 |
else:
|
269 |
-
st.session_state.start_time = datetime.datetime.
|
270 |
states = ["store", "store2","retriever","retriever2","chain","chain2"]
|
271 |
for state_to_del in states:
|
272 |
if state_to_del in st.session_state:
|
@@ -375,7 +334,12 @@ else:
|
|
375 |
var x = setInterval(function() {{
|
376 |
var start_time_str = "{st.session_state.start_time}";
|
377 |
var start_date = new Date(start_time_str);
|
378 |
-
var curr_date = new Date();
|
|
|
|
|
|
|
|
|
|
|
379 |
var time_difference = curr_date - start_date;
|
380 |
var time_diff_secs = Math.floor(time_difference / 1000);
|
381 |
var time_left = {TIME_LIMIT} - time_diff_secs;
|
@@ -412,7 +376,52 @@ else:
|
|
412 |
interactive_container = st.container()
|
413 |
user_input_col ,r = st.columns([4,1])
|
414 |
def to_grader_llm():
|
|
|
|
|
|
|
415 |
init_grader_llm()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
416 |
set_scenario_tab_index(ScenarioTabIndex.GRADER_LLM)
|
417 |
|
418 |
with r:
|
|
|
165 |
LLMChain(llm=llm, prompt=prompt, memory=st.session_state.memory, verbose=False)
|
166 |
)
|
167 |
|
168 |
+
# def init_grader_llm():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
169 |
|
170 |
login_info = {
|
171 |
"bob":"builder",
|
|
|
225 |
if selected_scenario is None or selected_scenario < 0:
|
226 |
st.warning("Please select a scenario!")
|
227 |
else:
|
228 |
+
st.session_state.start_time = datetime.datetime.utcnow()
|
229 |
states = ["store", "store2","retriever","retriever2","chain","chain2"]
|
230 |
for state_to_del in states:
|
231 |
if state_to_del in st.session_state:
|
|
|
334 |
var x = setInterval(function() {{
|
335 |
var start_time_str = "{st.session_state.start_time}";
|
336 |
var start_date = new Date(start_time_str);
|
337 |
+
// var curr_date = new Date();
|
338 |
+
var utc_date_str = new Date().toUTCString().slice(0, -4);
|
339 |
+
var curr_date = new Date(utc_date_str);
|
340 |
+
// console.log(utc_date_str);
|
341 |
+
// console.log("curr date", curr_date);
|
342 |
+
// console.log("start date", start_date);
|
343 |
var time_difference = curr_date - start_date;
|
344 |
var time_diff_secs = Math.floor(time_difference / 1000);
|
345 |
var time_left = {TIME_LIMIT} - time_diff_secs;
|
|
|
376 |
interactive_container = st.container()
|
377 |
user_input_col ,r = st.columns([4,1])
|
378 |
def to_grader_llm():
|
379 |
+
if "chain2" in st.session_state:
|
380 |
+
del st.session_state.chain2
|
381 |
+
"""
|
382 |
init_grader_llm()
|
383 |
+
"""
|
384 |
+
st.session_state["patient_chat_history"] = "History\n" + '\n'.join([(sp_mapper.get(i.type, i.type) + ": "+ i.content) for i in st.session_state.memory.chat_memory.messages])
|
385 |
+
## Grader
|
386 |
+
index_name = f"indexes/{st.session_state.scenario_list[st.session_state.selected_scenario]}/Rubric"
|
387 |
+
|
388 |
+
## Reset time
|
389 |
+
st.session_state.start_time = False
|
390 |
+
|
391 |
+
if "store2" not in st.session_state:
|
392 |
+
st.session_state.store2 = db.get_store(index_name, embeddings=embeddings)
|
393 |
+
if "retriever2" not in st.session_state:
|
394 |
+
st.session_state.retriever2 = st.session_state.store2.as_retriever(search_type="similarity", search_kwargs={"k":2})
|
395 |
+
|
396 |
+
## Re-init history
|
397 |
+
st.session_state["patient_chat_history"] = "History\n" + '\n'.join([(sp_mapper.get(i.type, i.type) + ": "+ i.content) for i in st.session_state.memory.chat_memory.messages])
|
398 |
+
|
399 |
+
if ("chain2" not in st.session_state
|
400 |
+
or
|
401 |
+
st.session_state.TEMPLATE2 != TEMPLATE2):
|
402 |
+
st.session_state.chain2 = (
|
403 |
+
RunnableParallel({
|
404 |
+
"context": st.session_state.retriever2 | format_docs,
|
405 |
+
# "history": RunnableLambda(lambda _: "History\n" + '\n'.join([(sp_mapper.get(i.type, i.type) + ": "+ i.content) for i in st.session_state.memory.chat_memory.messages])),
|
406 |
+
"history": (get_patient_chat_history),
|
407 |
+
"question": RunnablePassthrough(),
|
408 |
+
}) |
|
409 |
+
|
410 |
+
# LLMChain(llm=llm_i, prompt=prompt2, verbose=False ) #|
|
411 |
+
LLMChain(llm=llm_gpt4, prompt=prompt2, verbose=False ) #|
|
412 |
+
| {
|
413 |
+
"json": itemgetter("text"),
|
414 |
+
"text": (
|
415 |
+
LLMChain(
|
416 |
+
llm=llm,
|
417 |
+
prompt=PromptTemplate(
|
418 |
+
input_variables=["text"],
|
419 |
+
template="Interpret the following JSON of the student's grades, and do a write-up for each section.\n\n```json\n{text}\n```"),
|
420 |
+
verbose=False)
|
421 |
+
)
|
422 |
+
}
|
423 |
+
)
|
424 |
+
|
425 |
set_scenario_tab_index(ScenarioTabIndex.GRADER_LLM)
|
426 |
|
427 |
with r:
|