Spaces:
Sleeping
Sleeping
import streamlit as st | |
from index import build_index, build_service_context, change_prompts, load_documents | |
st.title("SAIRA: Student Affairs AI Response Assistant") | |
st.caption('Welcome to the SAIRA chatbot! This bot have knowledge about Innopolis University. Feel free to write your request!') | |
def load_docs_and_build_index(): | |
service_context = build_service_context() | |
docs = load_documents() | |
index = build_index(docs, service_context) | |
query_engine = index.as_query_engine(streaming=True) | |
change_prompts(query_engine) | |
return query_engine | |
query_engine = load_docs_and_build_index() | |
# Initialize chat history | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
# Display chat messages from history on app rerun | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
# Accept user input | |
if prompt := st.chat_input("What is up?"): | |
# Add user message to chat history | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
# Display user message in chat message container | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
# Display assistant response in chat message container | |
with st.chat_message("assistant"): | |
resp = query_engine.query(prompt) | |
message_placeholder = st.empty() | |
full_response = "" | |
# Simulate stream of response with milliseconds delay | |
for text in resp.response_gen: | |
full_response += text | |
# Add a blinking cursor to simulate typing | |
message_placeholder.markdown(full_response + "▌") | |
message_placeholder.markdown(full_response) | |
# Add assistant response to chat history | |
st.session_state.messages.append({"role": "assistant", "content": full_response}) | |