|
import streamlit as st |
|
from pypdf import PdfReader |
|
from langchain.text_splitter import CharacterTextSplitter |
|
from langchain.embeddings import OpenAIEmbeddings |
|
from langchain.vectorstores import FAISS |
|
from langchain.chat_models import ChatOpenAI |
|
from langchain.memory import ConversationBufferMemory |
|
from langchain.chains import ConversationalRetrievalChain |
|
from htmlTemplates import css, bot_template, user_template |
|
|
|
def get_pdf_text(pdf_docs): |
|
text = "" |
|
for pdf in pdf_docs: |
|
pdf_reader = PdfReader(pdf) |
|
for page in pdf_reader.pages: |
|
text += page.extract_text() |
|
return text |
|
|
|
def get_text_chunks(text): |
|
text_splitter = CharacterTextSplitter( |
|
separator="\n", |
|
chunk_size=1000, |
|
chunk_overlap=200, |
|
length_function=len |
|
) |
|
chunks = text_splitter.split_text(text) |
|
return chunks |
|
|
|
def get_vectorstore(text_chunks, openai_api_key, embedding_model): |
|
embeddings = OpenAIEmbeddings(api_key=openai_api_key, model=embedding_model) |
|
vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings) |
|
return vectorstore |
|
|
|
def get_conversation_chain(vectorstore, openai_api_key, chat_model): |
|
llm = ChatOpenAI(api_key=openai_api_key, model=chat_model) |
|
memory = ConversationBufferMemory( |
|
memory_key='chat_history', return_messages=True) |
|
conversation_chain = ConversationalRetrievalChain.from_llm( |
|
llm=llm, |
|
retriever=vectorstore.as_retriever(), |
|
memory=memory |
|
) |
|
return conversation_chain |
|
|
|
def handle_userinput(user_question): |
|
|
|
st.session_state.chat_history.append({"role": "user", "content": user_question}) |
|
|
|
|
|
response = st.session_state.conversation({'question': user_question}) |
|
st.session_state.chat_history.append({"role": "bot", "content": response['answer']}) |
|
|
|
|
|
for message in st.session_state.chat_history: |
|
if message['role'] == 'user': |
|
st.write(user_template.replace("{{MSG}}", message['content']), unsafe_allow_html=True) |
|
else: |
|
st.write(bot_template.replace("{{MSG}}", message['content']), unsafe_allow_html=True) |
|
|
|
def main(): |
|
st.set_page_config(page_title="Chat with multiple PDFs", page_icon=":books:") |
|
st.write(css, unsafe_allow_html=True) |
|
|
|
openai_api_key = st.sidebar.text_input("Enter your OpenAI API Key", type="password") |
|
|
|
|
|
embedding_model_options = [ |
|
"text-embedding-3-large", |
|
"text-embedding-3-small", |
|
"text-embedding-ada-002" |
|
] |
|
selected_embedding_model = st.sidebar.selectbox("Select the Embedding Model", embedding_model_options) |
|
|
|
|
|
chat_model_options = [ |
|
"gpt-4o-mini", |
|
"gpt-3.5-turbo-0125" |
|
] |
|
selected_chat_model = st.sidebar.selectbox("Select the Chat Model", chat_model_options) |
|
|
|
if "conversation" not in st.session_state: |
|
st.session_state.conversation = None |
|
if "chat_history" not in st.session_state: |
|
st.session_state.chat_history = [] |
|
|
|
st.header("Chat with multiple PDFs :books:") |
|
st.write("Please enter the data in the menu on the left") |
|
|
|
|
|
user_question = st.text_area("Ask a question about your documents:", height=100) |
|
|
|
|
|
if st.button("Send") and user_question and st.session_state.conversation: |
|
handle_userinput(user_question) |
|
st.session_state.user_question = "" |
|
|
|
with st.sidebar: |
|
st.subheader("Your documents") |
|
pdf_docs = st.file_uploader("Upload your PDFs here", accept_multiple_files=True) |
|
|
|
if pdf_docs and openai_api_key: |
|
if st.button("Process PDFs"): |
|
with st.spinner("Processing"): |
|
|
|
raw_text = get_pdf_text(pdf_docs) |
|
|
|
|
|
text_chunks = get_text_chunks(raw_text) |
|
|
|
|
|
vectorstore = get_vectorstore(text_chunks, openai_api_key, selected_embedding_model) |
|
|
|
|
|
st.session_state.conversation = get_conversation_chain(vectorstore, openai_api_key, selected_chat_model) |
|
st.success("PDFs processed successfully!") |
|
|
|
if __name__ == '__main__': |
|
main() |