Gosula commited on
Commit
eafbad4
1 Parent(s): ac8d44a

Upload 6 files

Browse files
Files changed (6) hide show
  1. .gitignore +160 -0
  2. .python-version +1 -0
  3. app.py +108 -0
  4. htmlTemplates.py +44 -0
  5. readme.md +65 -0
  6. requirements.txt +14 -0
.gitignore ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ .idea
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.9
app.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import streamlit as st
3
+ from dotenv import load_dotenv
4
+ from PyPDF2 import PdfReader
5
+ from langchain.text_splitter import CharacterTextSplitter
6
+ from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings
7
+ from langchain.vectorstores import FAISS
8
+ from langchain.chat_models import ChatOpenAI
9
+ from langchain.memory import ConversationBufferMemory
10
+ from langchain.chains import ConversationalRetrievalChain
11
+ from htmlTemplates import css, bot_template, user_template
12
+ from langchain.llms import HuggingFaceHub
13
+
14
+
15
+ def get_pdf_text(pdf_docs):
16
+ text = ""
17
+ for pdf in pdf_docs:
18
+ pdf_reader = PdfReader(pdf)
19
+ for page in pdf_reader.pages:
20
+ text += page.extract_text()
21
+ return text
22
+
23
+
24
+ def get_text_chunks(text):
25
+ text_splitter = CharacterTextSplitter(
26
+ separator="\n",
27
+ chunk_size=1000,
28
+ chunk_overlap=200,
29
+ length_function=len
30
+ )
31
+ chunks = text_splitter.split_text(text)
32
+ return chunks
33
+
34
+
35
+ def get_vectorstore(text_chunks):
36
+ embeddings = OpenAIEmbeddings()
37
+
38
+ #embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl")
39
+ vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
40
+ return vectorstore
41
+
42
+
43
+ def get_conversation_chain(vectorstore):
44
+ llm = ChatOpenAI()
45
+ #llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512})
46
+
47
+ memory = ConversationBufferMemory(
48
+ memory_key='chat_history', return_messages=True)
49
+ conversation_chain = ConversationalRetrievalChain.from_llm(
50
+ llm=llm,
51
+ retriever=vectorstore.as_retriever(),
52
+ memory=memory
53
+ )
54
+ return conversation_chain
55
+
56
+
57
+ def handle_userinput(user_question):
58
+ response = st.session_state.conversation({'question': user_question})
59
+ st.session_state.chat_history = response['chat_history']
60
+
61
+ for i, message in enumerate(st.session_state.chat_history):
62
+ if i % 2 == 0:
63
+ st.write(user_template.replace(
64
+ "{{MSG}}", message.content), unsafe_allow_html=True)
65
+ else:
66
+ st.write(bot_template.replace(
67
+ "{{MSG}}", message.content), unsafe_allow_html=True)
68
+
69
+
70
+ def main():
71
+ load_dotenv()
72
+ print(os.environ.get("OPENAI_API_KEY"))
73
+ st.set_page_config(page_title="Chat with multiple PDFs",
74
+ page_icon=":books:")
75
+ st.write(css, unsafe_allow_html=True)
76
+
77
+ if "conversation" not in st.session_state:
78
+ st.session_state.conversation = None
79
+ if "chat_history" not in st.session_state:
80
+ st.session_state.chat_history = None
81
+
82
+ st.header("Chat with multiple PDFs :books:")
83
+ user_question = st.text_input("Ask a question about your documents:")
84
+ if user_question:
85
+ handle_userinput(user_question)
86
+
87
+ with st.sidebar:
88
+ st.subheader("Your documents")
89
+ pdf_docs = st.file_uploader(
90
+ "Upload your PDFs here and click on 'Process'", accept_multiple_files=True)
91
+ if st.button("Process"):
92
+ with st.spinner("Processing"):
93
+ # get pdf text
94
+ raw_text = get_pdf_text(pdf_docs)
95
+
96
+ # get the text chunks
97
+ text_chunks = get_text_chunks(raw_text)
98
+
99
+ # create vector store
100
+ vectorstore = get_vectorstore(text_chunks)
101
+
102
+ # create conversation chain
103
+ st.session_state.conversation = get_conversation_chain(
104
+ vectorstore)
105
+
106
+
107
+ if __name__ == '__main__':
108
+ main()
htmlTemplates.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ css = '''
2
+ <style>
3
+ .chat-message {
4
+ padding: 1.5rem; border-radius: 0.5rem; margin-bottom: 1rem; display: flex
5
+ }
6
+ .chat-message.user {
7
+ background-color: #2b313e
8
+ }
9
+ .chat-message.bot {
10
+ background-color: #475063
11
+ }
12
+ .chat-message .avatar {
13
+ width: 20%;
14
+ }
15
+ .chat-message .avatar img {
16
+ max-width: 78px;
17
+ max-height: 78px;
18
+ border-radius: 50%;
19
+ object-fit: cover;
20
+ }
21
+ .chat-message .message {
22
+ width: 80%;
23
+ padding: 0 1.5rem;
24
+ color: #fff;
25
+ }
26
+ '''
27
+
28
+ bot_template = '''
29
+ <div class="chat-message bot">
30
+ <div class="avatar">
31
+ <img src="https://i.ibb.co/cN0nmSj/Screenshot-2023-05-28-at-02-37-21.png" style="max-height: 78px; max-width: 78px; border-radius: 50%; object-fit: cover;">
32
+ </div>
33
+ <div class="message">{{MSG}}</div>
34
+ </div>
35
+ '''
36
+
37
+ user_template = '''
38
+ <div class="chat-message user">
39
+ <div class="avatar">
40
+ <img src="https://i.ibb.co/rdZC7LZ/Photo-logo-1.png">
41
+ </div>
42
+ <div class="message">{{MSG}}</div>
43
+ </div>
44
+ '''
readme.md ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MultiPDF Chat App
2
+
3
+ > You can find the tutorial for this project on [YouTube](https://youtu.be/dXxQ0LR-3Hg).
4
+
5
+ ## Introduction
6
+ ------------
7
+ The MultiPDF Chat App is a Python application that allows you to chat with multiple PDF documents. You can ask questions about the PDFs using natural language, and the application will provide relevant responses based on the content of the documents. This app utilizes a language model to generate accurate answers to your queries. Please note that the app will only respond to questions related to the loaded PDFs.
8
+
9
+ ## How It Works
10
+ ------------
11
+
12
+ ![MultiPDF Chat App Diagram](./docs/PDF-LangChain.jpg)
13
+
14
+ The application follows these steps to provide responses to your questions:
15
+
16
+ 1. PDF Loading: The app reads multiple PDF documents and extracts their text content.
17
+
18
+ 2. Text Chunking: The extracted text is divided into smaller chunks that can be processed effectively.
19
+
20
+ 3. Language Model: The application utilizes a language model to generate vector representations (embeddings) of the text chunks.
21
+
22
+ 4. Similarity Matching: When you ask a question, the app compares it with the text chunks and identifies the most semantically similar ones.
23
+
24
+ 5. Response Generation: The selected chunks are passed to the language model, which generates a response based on the relevant content of the PDFs.
25
+
26
+ ## Dependencies and Installation
27
+ ----------------------------
28
+ To install the MultiPDF Chat App, please follow these steps:
29
+
30
+ 1. Clone the repository to your local machine.
31
+
32
+ 2. Install the required dependencies by running the following command:
33
+ ```
34
+ pip install -r requirements.txt
35
+ ```
36
+
37
+ 3. Obtain an API key from OpenAI and add it to the `.env` file in the project directory.
38
+ ```commandline
39
+ OPENAI_API_KEY=your_secrit_api_key
40
+ ```
41
+
42
+ ## Usage
43
+ -----
44
+ To use the MultiPDF Chat App, follow these steps:
45
+
46
+ 1. Ensure that you have installed the required dependencies and added the OpenAI API key to the `.env` file.
47
+
48
+ 2. Run the `main.py` file using the Streamlit CLI. Execute the following command:
49
+ ```
50
+ streamlit run app.py
51
+ ```
52
+
53
+ 3. The application will launch in your default web browser, displaying the user interface.
54
+
55
+ 4. Load multiple PDF documents into the app by following the provided instructions.
56
+
57
+ 5. Ask questions in natural language about the loaded PDFs using the chat interface.
58
+
59
+ ## Contributing
60
+ ------------
61
+ This repository is intended for educational purposes and does not accept further contributions. It serves as supporting material for a YouTube tutorial that demonstrates how to build this project. Feel free to utilize and enhance the app based on your own requirements.
62
+
63
+ ## License
64
+ -------
65
+ The MultiPDF Chat App is released under the [MIT License](https://opensource.org/licenses/MIT).
requirements.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ langchain==0.0.184
2
+ PyPDF2==3.0.1
3
+ python-dotenv==1.0.0
4
+ streamlit==1.18.1
5
+ openai==0.27.6
6
+ faiss-cpu==1.7.4
7
+ altair==4
8
+ tiktoken==0.4.0
9
+ # uncomment to use huggingface llms
10
+ # huggingface-hub==0.14.1
11
+
12
+ # uncomment to use instructor embeddings
13
+ # InstructorEmbedding==1.0.1
14
+ # sentence-transformers==2.2.2