Spaces:
Sleeping
Sleeping
File size: 3,328 Bytes
9fc8503 52a9f51 1fbd3ba 7a7da19 59067cc 1fbd3ba 9fc8503 d4a595c 6d04624 a5b024b 6d04624 7b1caf5 3891159 6d04624 a5b024b 6d04624 58d3a4e 6d04624 1fbd3ba 3758299 d4a595c 6d04624 d4a595c e5f2c7c d4a595c a5b024b d4a595c b19ba2f 3758299 59067cc 3758299 59067cc 9377963 3758299 59067cc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 |
import streamlit as st
from utils.api import get_chatgpt_response
from utils.chat_helpers import (
process_pdf,
initialize_vector_store,
add_pdf_to_vector_store,
process_txt,
process_md,
process_docx,
process_csv,
process_html,
process_pptx,
process_audio,
parse_text
)
# Initialize session state variables if not present
if "messages" not in st.session_state:
st.session_state.messages = []
if "model" not in st.session_state:
st.session_state.model = "gpt-4o-mini"
if "file_text" not in st.session_state:
st.session_state.file_text = ""
if "vector_store" not in st.session_state:
st.session_state.vector_store = initialize_vector_store()
st.title(f"Chat with {st.session_state.model} AI Model")
# Sidebar for model selection and file upload
with st.sidebar:
st.header("Configuration")
st.session_state.model = st.selectbox(
"Choose a model:",
("gpt-4o-mini", "gpt-4o", "gpt-3.5-turbo", "gpt-4", "gpt-4-turbo")
)
uploaded_file = st.file_uploader("Upload a file", type=["pdf", "txt", "md", "docx", "csv", "html", "pptx", "mp3", "wav"])
if uploaded_file is not None:
file_extension = uploaded_file.name.split('.')[-1]
if file_extension == "pdf":
st.session_state.file_text = process_pdf(uploaded_file)
elif file_extension == "txt":
st.session_state.file_text = process_txt(uploaded_file)
elif file_extension == "md":
st.session_state.file_text = process_md(uploaded_file)
elif file_extension == "docx":
st.session_state.file_text = process_docx(uploaded_file)
elif file_extension == "csv":
st.session_state.file_text = process_csv(uploaded_file)
elif file_extension == "html":
st.session_state.file_text = process_html(uploaded_file)
elif file_extension == "pptx":
st.session_state.file_text = process_pptx(uploaded_file)
elif file_extension in ["mp3", "wav"]:
st.session_state.file_text = process_audio(uploaded_file)
add_pdf_to_vector_store(st.session_state.file_text, st.session_state.vector_store)
st.success(f"{file_extension.upper()} file uploaded and processed successfully!")
if st.button("Reset File"):
st.session_state.file_text = ""
st.session_state.vector_store = initialize_vector_store()
st.success("File and vector store reset successfully!")
# Display chat history using the new chat API
for message in st.session_state.messages:
if message["role"] == "user":
st.chat_message("user").markdown(parse_text(message["content"]))
else:
st.chat_message("assistant").markdown(parse_text(message["content"]))
# Use st.chat_input for user input
if user_input := st.chat_input("You:"):
st.session_state.messages.append({"role": "user", "content": user_input})
response = get_chatgpt_response(st.session_state.messages, user_input, st.session_state.model, st.session_state.vector_store)
st.session_state.messages.append({"role": "assistant", "content": response})
# Display the user message and the assistant's response
st.chat_message("user").markdown(parse_text(user_input))
st.chat_message("assistant").markdown(parse_text(response))
|