Spaces:
Sleeping
Sleeping
DataRaptor
commited on
Commit
•
b87dbd1
1
Parent(s):
e90caa9
Update app.py
Browse files
app.py
CHANGED
@@ -83,6 +83,7 @@ with st.sidebar:
|
|
83 |
if verbose == True:
|
84 |
retv_cnt = st.slider('Display N retrived Doc', min_value=0, max_value=32, value=0, step=1)
|
85 |
|
|
|
86 |
|
87 |
|
88 |
|
@@ -134,6 +135,8 @@ def main():
|
|
134 |
return
|
135 |
|
136 |
response = resp['data']['responses'][0]['content']
|
|
|
|
|
137 |
context = resp['data']['logs']['content']['retrival_model']['matched_doc']
|
138 |
context_prob = resp['data']['logs']['content']['retrival_model']['matched_prob']
|
139 |
|
@@ -146,8 +149,10 @@ def main():
|
|
146 |
|
147 |
context = [str(round(b, 3)) + ': ' + a for a, b in zip (context, context_prob)]
|
148 |
context = '\n\n===============================\n\n'.join(context)
|
149 |
-
response = f'###### Config: Context Checker Value: {ctx_checker_tmp}, LM Value: {lm_tmp}\n\n##### Retrived Context:\n{retrived}\n\n##### Matched Context:{clen}\n{context}\n\n
|
150 |
|
|
|
|
|
151 |
|
152 |
# Display assistant response in chat message container
|
153 |
with st.chat_message("assistant", avatar=None):
|
|
|
83 |
if verbose == True:
|
84 |
retv_cnt = st.slider('Display N retrived Doc', min_value=0, max_value=32, value=0, step=1)
|
85 |
|
86 |
+
show_input = st.checkbox('Show Input of LLM', value=False)
|
87 |
|
88 |
|
89 |
|
|
|
135 |
return
|
136 |
|
137 |
response = resp['data']['responses'][0]['content']
|
138 |
+
reasoning = resp['data']['logs']['content']['llm']['reasoning']
|
139 |
+
llm_input = resp['data']['logs']['content']['llm']['llm_reasoning']
|
140 |
context = resp['data']['logs']['content']['retrival_model']['matched_doc']
|
141 |
context_prob = resp['data']['logs']['content']['retrival_model']['matched_prob']
|
142 |
|
|
|
149 |
|
150 |
context = [str(round(b, 3)) + ': ' + a for a, b in zip (context, context_prob)]
|
151 |
context = '\n\n===============================\n\n'.join(context)
|
152 |
+
response = f'###### Config: Context Checker Value: {ctx_checker_tmp}, LM Value: {lm_tmp}\n\n##### Retrived Context:\n{retrived}\n\n##### Response:\n{response}' # ##### Matched Context:{clen}\n{context}\n\n
|
153 |
|
154 |
+
if show_input:
|
155 |
+
response += '\n\n### LLM Input:\n' + llm_input
|
156 |
|
157 |
# Display assistant response in chat message container
|
158 |
with st.chat_message("assistant", avatar=None):
|