gtaraja commited on
Commit
2cbd3e7
1 Parent(s): bbc178f

Upload 5 files

Browse files
Files changed (5) hide show
  1. .env +2 -0
  2. app.py +103 -0
  3. htmlTemplates.py +44 -0
  4. readme.md +65 -0
  5. requirements.txt +14 -0
.env ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ OPENAI_API_KEY=
2
+ HUGGINGFACEHUB_API_TOKEN="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"
app.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from dotenv import load_dotenv
3
+ from PyPDF2 import PdfReader
4
+ from langchain.text_splitter import CharacterTextSplitter
5
+ from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings
6
+ from langchain.vectorstores import FAISS
7
+ #from langchain.chat_models import ChatOpenAI
8
+ from langchain.memory import ConversationBufferMemory
9
+ from langchain.chains import ConversationalRetrievalChain
10
+ from htmlTemplates import css, bot_template, user_template
11
+ from langchain.llms import HuggingFaceHub
12
+
13
+ def get_pdf_text(pdf_docs):
14
+ text = ""
15
+ for pdf in pdf_docs:
16
+ pdf_reader = PdfReader(pdf)
17
+ for page in pdf_reader.pages:
18
+ text += page.extract_text()
19
+ return text
20
+
21
+
22
+ def get_text_chunks(text):
23
+ text_splitter = CharacterTextSplitter(
24
+ separator="\n",
25
+ chunk_size=1000,
26
+ chunk_overlap=200,
27
+ length_function=len
28
+ )
29
+ chunks = text_splitter.split_text(text)
30
+ return chunks
31
+
32
+
33
+ def get_vectorstore(text_chunks):
34
+ #embeddings = OpenAIEmbeddings()
35
+ embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl")
36
+ vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
37
+ return vectorstore
38
+
39
+
40
+ def get_conversation_chain(vectorstore):
41
+ #llm = ChatOpenAI()
42
+ llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512})
43
+
44
+ memory = ConversationBufferMemory(
45
+ memory_key='chat_history', return_messages=True)
46
+ conversation_chain = ConversationalRetrievalChain.from_llm(
47
+ llm=llm,
48
+ retriever=vectorstore.as_retriever(),
49
+ memory=memory)
50
+ return conversation_chain
51
+
52
+
53
+ def handle_userinput(user_question):
54
+ response = st.session_state.conversation({'question': user_question})
55
+ st.session_state.chat_history = response['chat_history']
56
+
57
+ for i, message in enumerate(st.session_state.chat_history):
58
+ if i % 2 == 0:
59
+ st.write(user_template.replace(
60
+ "{{MSG}}", message.content), unsafe_allow_html=True)
61
+ else:
62
+ st.write(bot_template.replace(
63
+ "{{MSG}}", message.content), unsafe_allow_html=True)
64
+
65
+
66
+ def main():
67
+ load_dotenv()
68
+ st.set_page_config(page_title="Chat with multiple PDFs",
69
+ page_icon=":books:")
70
+ st.write(css, unsafe_allow_html=True)
71
+
72
+ if "conversation" not in st.session_state:
73
+ st.session_state.conversation = None
74
+ if "chat_history" not in st.session_state:
75
+ st.session_state.chat_history = None
76
+
77
+ st.header("Chat with multiple PDFs :books:")
78
+ user_question = st.text_input("Ask a question about your documents:")
79
+ if user_question:
80
+ handle_userinput(user_question)
81
+
82
+ with st.sidebar:
83
+ st.subheader("Your documents")
84
+ pdf_docs = st.file_uploader(
85
+ "Upload your PDFs here and click on 'Process'", accept_multiple_files=True)
86
+ if st.button("Process"):
87
+ with st.spinner("Processing"):
88
+ # get pdf text
89
+ raw_text = get_pdf_text(pdf_docs)
90
+
91
+ # get the text chunks
92
+ text_chunks = get_text_chunks(raw_text)
93
+
94
+ # create vector store
95
+ vectorstore = get_vectorstore(text_chunks)
96
+
97
+ # create conversation chain
98
+ st.session_state.conversation = get_conversation_chain(
99
+ vectorstore)
100
+
101
+
102
+ if __name__ == '__main__':
103
+ main()
htmlTemplates.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ css = '''
2
+ <style>
3
+ .chat-message {
4
+ padding: 1.5rem; border-radius: 0.5rem; margin-bottom: 1rem; display: flex
5
+ }
6
+ .chat-message.user {
7
+ background-color: #2b313e
8
+ }
9
+ .chat-message.bot {
10
+ background-color: #475063
11
+ }
12
+ .chat-message .avatar {
13
+ width: 20%;
14
+ }
15
+ .chat-message .avatar img {
16
+ max-width: 78px;
17
+ max-height: 78px;
18
+ border-radius: 50%;
19
+ object-fit: cover;
20
+ }
21
+ .chat-message .message {
22
+ width: 80%;
23
+ padding: 0 1.5rem;
24
+ color: #fff;
25
+ }
26
+ '''
27
+
28
+ bot_template = '''
29
+ <div class="chat-message bot">
30
+ <div class="avatar">
31
+ <img src="https://i.ibb.co/cN0nmSj/Screenshot-2023-05-28-at-02-37-21.png" style="max-height: 78px; max-width: 78px; border-radius: 50%; object-fit: cover;">
32
+ </div>
33
+ <div class="message">{{MSG}}</div>
34
+ </div>
35
+ '''
36
+
37
+ user_template = '''
38
+ <div class="chat-message user">
39
+ <div class="avatar">
40
+ <img src="https://i.ibb.co/rdZC7LZ/Photo-logo-1.png">
41
+ </div>
42
+ <div class="message">{{MSG}}</div>
43
+ </div>
44
+ '''
readme.md ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MultiPDF Chat App
2
+
3
+ > You can find the tutorial for this project on [YouTube](https://youtu.be/dXxQ0LR-3Hg).
4
+
5
+ ## Introduction
6
+ ------------
7
+ The MultiPDF Chat App is a Python application that allows you to chat with multiple PDF documents. You can ask questions about the PDFs using natural language, and the application will provide relevant responses based on the content of the documents. This app utilizes a language model to generate accurate answers to your queries. Please note that the app will only respond to questions related to the loaded PDFs.
8
+
9
+ ## How It Works
10
+ ------------
11
+
12
+ ![MultiPDF Chat App Diagram](./docs/PDF-LangChain.jpg)
13
+
14
+ The application follows these steps to provide responses to your questions:
15
+
16
+ 1. PDF Loading: The app reads multiple PDF documents and extracts their text content.
17
+
18
+ 2. Text Chunking: The extracted text is divided into smaller chunks that can be processed effectively.
19
+
20
+ 3. Language Model: The application utilizes a language model to generate vector representations (embeddings) of the text chunks.
21
+
22
+ 4. Similarity Matching: When you ask a question, the app compares it with the text chunks and identifies the most semantically similar ones.
23
+
24
+ 5. Response Generation: The selected chunks are passed to the language model, which generates a response based on the relevant content of the PDFs.
25
+
26
+ ## Dependencies and Installation
27
+ ----------------------------
28
+ To install the MultiPDF Chat App, please follow these steps:
29
+
30
+ 1. Clone the repository to your local machine.
31
+
32
+ 2. Install the required dependencies by running the following command:
33
+ ```
34
+ pip install -r requirements.txt
35
+ ```
36
+
37
+ 3. Obtain an API key from OpenAI and add it to the `.env` file in the project directory.
38
+ ```commandline
39
+ OPENAI_API_KEY=your_secrit_api_key
40
+ ```
41
+
42
+ ## Usage
43
+ -----
44
+ To use the MultiPDF Chat App, follow these steps:
45
+
46
+ 1. Ensure that you have installed the required dependencies and added the OpenAI API key to the `.env` file.
47
+
48
+ 2. Run the `main.py` file using the Streamlit CLI. Execute the following command:
49
+ ```
50
+ streamlit run app.py
51
+ ```
52
+
53
+ 3. The application will launch in your default web browser, displaying the user interface.
54
+
55
+ 4. Load multiple PDF documents into the app by following the provided instructions.
56
+
57
+ 5. Ask questions in natural language about the loaded PDFs using the chat interface.
58
+
59
+ ## Contributing
60
+ ------------
61
+ This repository is intended for educational purposes and does not accept further contributions. It serves as supporting material for a YouTube tutorial that demonstrates how to build this project. Feel free to utilize and enhance the app based on your own requirements.
62
+
63
+ ## License
64
+ -------
65
+ The MultiPDF Chat App is released under the [MIT License](https://opensource.org/licenses/MIT).
requirements.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ langchain==0.0.184
2
+ PyPDF2==3.0.1
3
+ python-dotenv==1.0.0
4
+ streamlit==1.18.1
5
+ openai==0.27.6
6
+ faiss-cpu==1.7.4
7
+ altair==4
8
+ tiktoken==0.4.0
9
+ # uncomment to use huggingface llms
10
+ # huggingface-hub==0.14.1
11
+
12
+ # uncomment to use instructor embeddings
13
+ # InstructorEmbedding==1.0.1
14
+ # sentence-transformers==2.2.2