Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,8 +3,11 @@ from query import VectaraQuery
|
|
3 |
import os
|
4 |
|
5 |
import streamlit as st
|
|
|
|
|
6 |
from PIL import Image
|
7 |
|
|
|
8 |
|
9 |
def isTrue(x) -> bool:
|
10 |
if isinstance(x, bool):
|
@@ -19,29 +22,40 @@ def launch_bot():
|
|
19 |
def generate_streaming_response(question):
|
20 |
response = vq.submit_query_streaming(question)
|
21 |
return response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
if 'cfg' not in st.session_state:
|
24 |
-
|
25 |
cfg = OmegaConf.create({
|
26 |
-
'
|
27 |
-
'corpus_ids': corpus_ids,
|
28 |
'api_key': str(os.environ['api_key']),
|
29 |
'title': os.environ['title'],
|
30 |
'description': os.environ['description'],
|
31 |
'source_data_desc': os.environ['source_data_desc'],
|
32 |
'streaming': isTrue(os.environ.get('streaming', False)),
|
33 |
'prompt_name': os.environ.get('prompt_name', None),
|
34 |
-
'examples': os.environ.get('examples',
|
35 |
})
|
36 |
st.session_state.cfg = cfg
|
37 |
-
st.session_state.
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
cfg = st.session_state.cfg
|
40 |
vq = st.session_state.vq
|
41 |
st.set_page_config(page_title=cfg.title, layout="wide")
|
42 |
|
43 |
-
|
44 |
-
# with streamlit_analytics.track():
|
45 |
# left side content
|
46 |
with st.sidebar:
|
47 |
image = Image.open('Vectara-logo.png')
|
@@ -61,46 +75,31 @@ def launch_bot():
|
|
61 |
st.markdown(f"<center> <h2> Vectara chat demo: {cfg.title} </h2> </center>", unsafe_allow_html=True)
|
62 |
st.markdown(f"<center> <h4> {cfg.description} </h4> </center>", unsafe_allow_html=True)
|
63 |
|
64 |
-
|
65 |
if "messages" not in st.session_state.keys():
|
66 |
st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]
|
67 |
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
example_messages = [example.strip() for example in cfg.examples.split(",")]
|
75 |
-
|
76 |
-
if len(example_messages) > 0:
|
77 |
-
st.markdown("<h6>Queries To Try:</h6>", unsafe_allow_html=True)
|
78 |
-
cols = st.columns(4)
|
79 |
-
|
80 |
# Display chat messages
|
81 |
for message in st.session_state.messages:
|
82 |
with st.chat_message(message["role"]):
|
83 |
st.write(message["content"])
|
84 |
|
85 |
-
#
|
86 |
-
if
|
|
|
|
|
|
|
|
|
87 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
88 |
with st.chat_message("user"):
|
89 |
st.write(prompt)
|
90 |
-
|
91 |
-
|
92 |
-
for i, example in enumerate(example_messages):
|
93 |
-
button_pressed = False
|
94 |
-
with cols[i]:
|
95 |
-
if st.button(example):
|
96 |
-
prompt = example
|
97 |
-
button_pressed = True
|
98 |
-
|
99 |
-
if button_pressed:
|
100 |
-
st.session_state.messages.append({"role": "user", "content": prompt})
|
101 |
-
with st.chat_message("user"):
|
102 |
-
st.write(prompt)
|
103 |
-
|
104 |
# Generate a new response if last message is not from assistant
|
105 |
if st.session_state.messages[-1]["role"] != "assistant":
|
106 |
with st.chat_message("assistant"):
|
@@ -113,6 +112,7 @@ def launch_bot():
|
|
113 |
st.write(response)
|
114 |
message = {"role": "assistant", "content": response}
|
115 |
st.session_state.messages.append(message)
|
|
|
116 |
|
117 |
if __name__ == "__main__":
|
118 |
launch_bot()
|
|
|
3 |
import os
|
4 |
|
5 |
import streamlit as st
|
6 |
+
from streamlit_pills import pills
|
7 |
+
|
8 |
from PIL import Image
|
9 |
|
10 |
+
max_examples = 4
|
11 |
|
12 |
def isTrue(x) -> bool:
|
13 |
if isinstance(x, bool):
|
|
|
22 |
def generate_streaming_response(question):
|
23 |
response = vq.submit_query_streaming(question)
|
24 |
return response
|
25 |
+
|
26 |
+
def show_example_questions():
|
27 |
+
if len(st.session_state.example_messages) > 0 and st.session_state.first_turn:
|
28 |
+
selected_example = pills("Queries to Try:", st.session_state.example_messages, index=None)
|
29 |
+
if selected_example:
|
30 |
+
st.session_state.ex_prompt = selected_example
|
31 |
+
st.session_state.first_turn = False
|
32 |
+
return True
|
33 |
+
return False
|
34 |
|
35 |
if 'cfg' not in st.session_state:
|
36 |
+
corpus_keys = str(os.environ['corpus_keys']).split(',')
|
37 |
cfg = OmegaConf.create({
|
38 |
+
'corpus_keys': corpus_keys,
|
|
|
39 |
'api_key': str(os.environ['api_key']),
|
40 |
'title': os.environ['title'],
|
41 |
'description': os.environ['description'],
|
42 |
'source_data_desc': os.environ['source_data_desc'],
|
43 |
'streaming': isTrue(os.environ.get('streaming', False)),
|
44 |
'prompt_name': os.environ.get('prompt_name', None),
|
45 |
+
'examples': os.environ.get('examples', None)
|
46 |
})
|
47 |
st.session_state.cfg = cfg
|
48 |
+
st.session_state.ex_prompt = None
|
49 |
+
st.session_state.first_turn = True
|
50 |
+
example_messages = [example.strip() for example in cfg.examples.split(",")]
|
51 |
+
st.session_state.example_messages = [em for em in example_messages if len(em)>0][:max_examples]
|
52 |
+
|
53 |
+
st.session_state.vq = VectaraQuery(cfg.api_key, cfg.corpus_keys, cfg.prompt_name)
|
54 |
|
55 |
cfg = st.session_state.cfg
|
56 |
vq = st.session_state.vq
|
57 |
st.set_page_config(page_title=cfg.title, layout="wide")
|
58 |
|
|
|
|
|
59 |
# left side content
|
60 |
with st.sidebar:
|
61 |
image = Image.open('Vectara-logo.png')
|
|
|
75 |
st.markdown(f"<center> <h2> Vectara chat demo: {cfg.title} </h2> </center>", unsafe_allow_html=True)
|
76 |
st.markdown(f"<center> <h4> {cfg.description} </h4> </center>", unsafe_allow_html=True)
|
77 |
|
|
|
78 |
if "messages" not in st.session_state.keys():
|
79 |
st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]
|
80 |
|
81 |
+
example_container = st.empty()
|
82 |
+
with example_container:
|
83 |
+
if show_example_questions():
|
84 |
+
example_container.empty()
|
85 |
+
st.rerun()
|
86 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
# Display chat messages
|
88 |
for message in st.session_state.messages:
|
89 |
with st.chat_message(message["role"]):
|
90 |
st.write(message["content"])
|
91 |
|
92 |
+
# select prompt from example question or user provided input
|
93 |
+
if st.session_state.ex_prompt:
|
94 |
+
prompt = st.session_state.ex_prompt
|
95 |
+
else:
|
96 |
+
prompt = st.chat_input()
|
97 |
+
if prompt:
|
98 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
99 |
with st.chat_message("user"):
|
100 |
st.write(prompt)
|
101 |
+
st.session_state.ex_prompt = None
|
102 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
# Generate a new response if last message is not from assistant
|
104 |
if st.session_state.messages[-1]["role"] != "assistant":
|
105 |
with st.chat_message("assistant"):
|
|
|
112 |
st.write(response)
|
113 |
message = {"role": "assistant", "content": response}
|
114 |
st.session_state.messages.append(message)
|
115 |
+
st.rerun()
|
116 |
|
117 |
if __name__ == "__main__":
|
118 |
launch_bot()
|