Spaces:
Sleeping
Sleeping
import streamlit as st | |
from utils.api import get_chatgpt_response | |
from utils.chat_helpers import ( | |
process_pdf, | |
initialize_vector_store, | |
add_pdf_to_vector_store, | |
process_txt, | |
process_md, | |
process_docx, | |
process_csv, | |
process_html, | |
process_pptx, | |
process_audio, | |
parse_text | |
) | |
# Initialize session state variables if not present | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
if "model" not in st.session_state: | |
st.session_state.model = "gpt-4o-mini" | |
if "file_text" not in st.session_state: | |
st.session_state.file_text = "" | |
if "vector_store" not in st.session_state: | |
st.session_state.vector_store = initialize_vector_store() | |
st.title(f"Chat with {st.session_state.model} AI Model") | |
# Sidebar for model selection and file upload | |
with st.sidebar: | |
st.header("Configuration") | |
st.session_state.model = st.selectbox( | |
"Choose a model:", | |
("gpt-4o-mini", "gpt-4o", "gpt-3.5-turbo", "gpt-4", "gpt-4-turbo") | |
) | |
uploaded_file = st.file_uploader("Upload a file", type=["pdf", "txt", "md", "docx", "csv", "html", "pptx", "mp3", "wav"]) | |
if uploaded_file is not None: | |
file_extension = uploaded_file.name.split('.')[-1] | |
if file_extension == "pdf": | |
st.session_state.file_text = process_pdf(uploaded_file) | |
elif file_extension == "txt": | |
st.session_state.file_text = process_txt(uploaded_file) | |
elif file_extension == "md": | |
st.session_state.file_text = process_md(uploaded_file) | |
elif file_extension == "docx": | |
st.session_state.file_text = process_docx(uploaded_file) | |
elif file_extension == "csv": | |
st.session_state.file_text = process_csv(uploaded_file) | |
elif file_extension == "html": | |
st.session_state.file_text = process_html(uploaded_file) | |
elif file_extension == "pptx": | |
st.session_state.file_text = process_pptx(uploaded_file) | |
elif file_extension in ["mp3", "wav"]: | |
st.session_state.file_text = process_audio(uploaded_file) | |
add_pdf_to_vector_store(st.session_state.file_text, st.session_state.vector_store) | |
st.success(f"{file_extension.upper()} file uploaded and processed successfully!") | |
if st.button("Reset File"): | |
st.session_state.file_text = "" | |
st.session_state.vector_store = initialize_vector_store() | |
st.success("File and vector store reset successfully!") | |
# Display chat history using the new chat API | |
for message in st.session_state.messages: | |
if message["role"] == "user": | |
st.chat_message("user").markdown(parse_text(message["content"])) | |
else: | |
st.chat_message("assistant").markdown(parse_text(message["content"])) | |
# Use st.chat_input for user input | |
if user_input := st.chat_input("You:"): | |
st.session_state.messages.append({"role": "user", "content": user_input}) | |
response = get_chatgpt_response(st.session_state.messages, user_input, st.session_state.model, st.session_state.vector_store) | |
st.session_state.messages.append({"role": "assistant", "content": response}) | |
# Display the user message and the assistant's response | |
st.chat_message("user").markdown(parse_text(user_input)) | |
st.chat_message("assistant").markdown(parse_text(response)) | |