tony346 commited on
Commit
d396d3b
1 Parent(s): 0850cd6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +157 -99
app.py CHANGED
@@ -1,108 +1,166 @@
1
- # Import necessary modules for processing documents, embeddings, Q&A, etc. from 'langchain' library.
2
  from dotenv import load_dotenv
3
- load_dotenv() # Load environment variables from a .env file.
4
- from langchain.document_loaders import PyPDFLoader # For loading and reading PDF documents.
5
- from langchain.text_splitter import RecursiveCharacterTextSplitter # For splitting large texts into smaller chunks.
6
- from langchain.vectorstores import Chroma # Vector storage system for embeddings.
7
  from langchain.llms import CTransformers # For loading transformer models.
8
- # from InstructorEmbedding import INSTRUCTOR # Not clear without context, possibly a custom embedding.
9
- from langchain.embeddings import HuggingFaceInstructEmbeddings # Embeddings from HuggingFace models with instructions.
10
  from langchain.embeddings import HuggingFaceEmbeddings # General embeddings from HuggingFace models.
11
- from langchain.embeddings import LlamaCppEmbeddings # Embeddings using the Llama model.
12
- from langchain.chains import RetrievalQA # Q&A retrieval system.
13
- from langchain.embeddings import OpenAIEmbeddings # Embeddings from OpenAI models.
14
- from langchain.vectorstores import FAISS # Another vector storage system for embeddings.
15
-
16
- # Import Streamlit for creating a web application and other necessary modules for file handling.
17
- import streamlit as st # Main library for creating the web application.
18
- import tempfile # For creating temporary directories and files.
19
- import os # For handling file and directory paths.
20
-
21
- # Import a handler for streaming outputs.
22
- from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler # For live updates in the Streamlit app.
23
-
24
- st.title("ChatPDF")
25
-
26
- st.markdown("""
27
- ChatPDF is a web application that can answer questions based on a PDF document. To use the app, simply upload a PDF file and type your question in the input box. The app will then use a powerful language model to generate an answer to your question.
28
- """)
29
-
30
- # Create a visual separator in the app.
31
- st.write("---")
32
-
33
- # Add a file uploader widget for users to upload their PDF files.
34
- uploaded_file = st.sidebar.file_uploader("Upload your PDF file!", type=['pdf'])
35
- # Another visual separator after the file uploader.
36
- st.write("---")
37
-
38
- # Function to convert the uploaded PDF into a readable document format.
39
- def pdf_to_document(uploaded_file):
40
- # Create a temporary directory for storing the uploaded PDF.
41
- temp_dir = tempfile.TemporaryDirectory()
42
- # Get the path where the uploaded PDF will be stored temporarily.
43
- temp_filepath = os.path.join(temp_dir.name, uploaded_file.name)
44
-
45
- # Save the uploaded PDF to the temporary path.
46
- with open(temp_filepath, "wb") as f:
47
- f.write(uploaded_file.getvalue())
48
-
49
- # Load the PDF and split it into individual pages.
50
- loader = PyPDFLoader(temp_filepath)
51
- pages = loader.load_and_split()
52
- return pages
53
-
54
- # Check if a user has uploaded a file.
55
- if uploaded_file is not None:
56
- # Convert the uploaded PDF into a document format.
57
- pages = pdf_to_document(uploaded_file)
58
-
59
- # Initialize a tool to split the document into smaller textual chunks.
60
- text_splitter = RecursiveCharacterTextSplitter(
61
- chunk_size = 300, # Define the size of each chunk.
62
- chunk_overlap = 20, # Define how much chunks can overlap.
63
- length_function = len # Function to determine the length of texts.
64
  )
65
- # Split the document into chunks.
66
- texts = text_splitter.split_documents(pages)
67
 
68
- ## Below are examples of different embedding techniques, but they are commented out.
69
 
 
70
  # Load the desired embeddings model.
71
  embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2',
72
  model_kwargs={'device': 'cpu'})
73
-
74
- # Load the textual chunks into the Chroma vector store.
75
- print('texts = ',texts)
76
- print('embeddings = ', embeddings)
77
- db = FAISS.from_documents(texts,embeddings)
78
- # db = Chroma.from_documents(texts, embeddings)
79
-
80
- # Custom handler to stream outputs live to the Streamlit application.
81
- from langchain.callbacks.base import BaseCallbackHandler
82
- class StreamHandler(BaseCallbackHandler):
83
- def __init__(self, container, initial_text=""):
84
- self.container = container # Streamlit container to display text.
85
- self.text=initial_text
86
- def on_llm_new_token(self, token: str, **kwargs) -> None:
87
- self.text+=token # Add new tokens to the text.
88
- self.container.markdown(self.text) # Display the text.
89
-
90
- # Header for the Q&A section of the web app.
91
- st.header("Ask the PDF a question!")
92
- # Input box for users to type their questions.
93
- question = st.text_input('Type your question')
94
-
95
- # Check if the user has pressed the 'Ask' button.
96
- if st.button('Ask'):
97
- # Display a spinner while processing the question.
98
- with st.spinner('Processing...'):
99
- # Space to display the answer.
100
- chat_box = st.empty()
101
- # Initialize the handler to stream outputs.
102
- stream_hander = StreamHandler(chat_box)
103
-
104
- # Initialize the Q&A model and chain.
105
- llm = CTransformers(model="llama-2-7b-chat.ggmlv3.q2_K.bin", model_type="llama", callbacks=[stream_hander])
106
- qa_chain = RetrievalQA.from_chain_type(llm, retriever=db.as_retriever())
107
- # Get the answer to the user's question.
108
- qa_chain({"query": question})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
  from dotenv import load_dotenv
3
+ from PyPDF2 import PdfReader
4
+ from langchain.text_splitter import CharacterTextSplitter
 
 
5
  from langchain.llms import CTransformers # For loading transformer models.
6
+ from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings
7
+ from langchain.vectorstores import FAISS
8
  from langchain.embeddings import HuggingFaceEmbeddings # General embeddings from HuggingFace models.
9
+ from langchain.chat_models import ChatOpenAI
10
+ from langchain.memory import ConversationBufferMemory
11
+ from langchain.chains import ConversationalRetrievalChain
12
+ from htmlTemplates import css, bot_template, user_template
13
+ from langchain.llms import HuggingFaceHub
14
+
15
+ def get_pdf_text(pdf_docs):
16
+ text = ""
17
+ pdf_reader = PdfReader(pdf_docs)
18
+ for page in pdf_reader.pages:
19
+ text += page.extract_text()
20
+ return text
21
+
22
+
23
+ def get_text_chunks(text):
24
+ text_splitter = CharacterTextSplitter(
25
+ separator="\n",
26
+ chunk_size=1000,
27
+ chunk_overlap=200,
28
+ length_function=len
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  )
30
+ chunks = text_splitter.split_text(text)
31
+ return chunks
32
 
 
33
 
34
+ def get_vectorstore(text_chunks):
35
  # Load the desired embeddings model.
36
  embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2',
37
  model_kwargs={'device': 'cpu'})
38
+ # embeddings = OpenAIEmbeddings()
39
+ # embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl")
40
+ vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
41
+ return vectorstore
42
+
43
+
44
+ def get_conversation_chain(vectorstore):
45
+ # llm = ChatOpenAI()
46
+ # llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512})
47
+ llm = CTransformers(model="llama-2-7b-chat.ggmlv3.q2_K.bin", model_type="llama")
48
+ memory = ConversationBufferMemory(
49
+ memory_key='chat_history', return_messages=True)
50
+ conversation_chain = ConversationalRetrievalChain.from_llm(
51
+ llm=llm,
52
+ retriever=vectorstore.as_retriever(),
53
+ memory=memory
54
+ )
55
+ return conversation_chain
56
+
57
+
58
+ def handle_userinput(user_question):
59
+ response = st.session_state.conversation({'query': user_question})
60
+ st.session_state.chat_history = response['chat_history']
61
+
62
+ for i, message in enumerate(st.session_state.chat_history):
63
+ if i % 2 == 0:
64
+ st.write(user_template.replace(
65
+ "{{MSG}}", message.content), unsafe_allow_html=True)
66
+ else:
67
+ st.write(bot_template.replace(
68
+ "{{MSG}}", message.content), unsafe_allow_html=True)
69
+
70
+ def get_text_file(docs):
71
+ text = f.read()
72
+ return text
73
+
74
+ def get_csv_file(docs):
75
+ import pandas as pd
76
+ text = ''
77
+
78
+ data = pd.read_csv(docs)
79
+
80
+ for index, row in data.iterrows():
81
+ item_name = row[0]
82
+ row_text = item_name
83
+ for col_name in data.columns[1:]:
84
+ row_text += '{} is {} '.format(col_name, row[col_name])
85
+ text += row_text + '\n'
86
+
87
+ return text
88
+
89
+ def get_json_file(docs):
90
+ import json
91
+ text = ''
92
+ with open(docs, 'r') as f:
93
+ json_data = json.load(f)
94
+
95
+ for f_key, f_value in json_data.items():
96
+ for s_value in f_value:
97
+ text += str(f_key) + str(s_value)
98
+ text += '\n'
99
+ #print(text)
100
+ return text
101
+
102
+ def get_hwp_file(docs):
103
+ pass
104
+
105
+ def get_docs_file(docs):
106
+ pass
107
+
108
+
109
+ def main():
110
+ load_dotenv()
111
+ st.set_page_config(page_title="Chat with multiple PDFs",
112
+ page_icon=":books:")
113
+ st.write(css, unsafe_allow_html=True)
114
+
115
+ if "conversation" not in st.session_state:
116
+ st.session_state.conversation = None
117
+ if "chat_history" not in st.session_state:
118
+ st.session_state.chat_history = None
119
+
120
+ st.header("Chat with multiple PDFs :books:")
121
+ user_question = st.text_input("Ask a question about your documents:")
122
+ if user_question:
123
+ handle_userinput(user_question)
124
+
125
+ with st.sidebar:
126
+ st.subheader("Your documents")
127
+ docs = st.file_uploader(
128
+ "Upload your PDFs here and click on 'Process'", accept_multiple_files=True)
129
+ if st.button("Process"):
130
+ with st.spinner("Processing"):
131
+ # get pdf text
132
+ raw_text = ""
133
+ for file in docs:
134
+ if file.type == 'text/plain':
135
+ #file is .txt
136
+ raw_text += get_text_file(file)
137
+ elif file.type == 'application/octet-stream':
138
+ #file is .pdf
139
+ raw_text += get_pdf_text(file)
140
+ elif file.type == 'text/csv':
141
+ #file is .csv
142
+ raw_text += get_csv_file(file)
143
+ elif file.type == 'application/json':
144
+ # file is .json
145
+ raw_text += get_json_file(file)
146
+ elif file.type == 'application/x-hwp':
147
+ # file is .hwp
148
+ raw_text += get_hwp_file(file)
149
+ elif file.type == 'application/vnd.openxmlformats-officedocument.wordprocessingml.document':
150
+ # file is .docs
151
+ raw_text += get_docs_file(file)
152
+
153
+
154
+ # get the text chunks
155
+ text_chunks = get_text_chunks(raw_text)
156
+
157
+ # create vector store
158
+ vectorstore = get_vectorstore(text_chunks)
159
+
160
+ # create conversation chain
161
+ st.session_state.conversation = get_conversation_chain(
162
+ vectorstore)
163
+
164
+
165
+ if __name__ == '__main__':
166
+ main()