evanrsl commited on
Commit
a322dc1
1 Parent(s): 9a5ca89

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +272 -0
app.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+
4
+ from langchain_community.document_loaders import PyPDFLoader
5
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
6
+ from langchain_community.vectorstores import Chroma
7
+ from langchain.chains import ConversationalRetrievalChain
8
+ from langchain_community.embeddings import HuggingFaceEmbeddings
9
+ from langchain_community.llms import HuggingFacePipeline
10
+ from langchain.chains import ConversationChain
11
+ from langchain.memory import ConversationBufferMemory
12
+ from langchain_community.llms import HuggingFaceEndpoint
13
+
14
+ from pathlib import Path
15
+ import chromadb
16
+ from unidecode import unidecode
17
+
18
+ from transformers import AutoTokenizer
19
+ import transformers
20
+ import torch
21
+ import tqdm
22
+ import accelerate
23
+ import re
24
+
25
+
26
+
27
+ # default_persist_directory = './chroma_HF/'
28
+ list_llm = ["mistralai/Mistral-7B-Instruct-v0.2", "mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.1"]
29
+ list_llm_simple = [os.path.basename(llm) for llm in list_llm]
30
+
31
+ # Load PDF document and create doc splits
32
+ def load_doc(list_file_path, chunk_size, chunk_overlap):
33
+ # Processing for one document only
34
+ # loader = PyPDFLoader(file_path)
35
+ # pages = loader.load()
36
+ loaders = [PyPDFLoader(x) for x in list_file_path]
37
+ pages = []
38
+ for loader in loaders:
39
+ pages.extend(loader.load())
40
+ # text_splitter = RecursiveCharacterTextSplitter(chunk_size = 600, chunk_overlap = 50)
41
+ text_splitter = RecursiveCharacterTextSplitter(
42
+ chunk_size = chunk_size,
43
+ chunk_overlap = chunk_overlap)
44
+ doc_splits = text_splitter.split_documents(pages)
45
+ return doc_splits
46
+
47
+
48
+ # Create vector database
49
+ def create_db(splits, collection_name):
50
+ embedding = HuggingFaceEmbeddings()
51
+ new_client = chromadb.EphemeralClient()
52
+ vectordb = Chroma.from_documents(
53
+ documents=splits,
54
+ embedding=embedding,
55
+ client=new_client,
56
+ collection_name=collection_name,
57
+ # persist_directory=default_persist_directory
58
+ )
59
+ return vectordb
60
+
61
+
62
+
63
+ # Initialize langchain LLM chain
64
+ def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
65
+ if llm_model == "mistralai/Mixtral-8x7B-Instruct-v0.1":
66
+ llm = HuggingFaceEndpoint(
67
+ repo_id=llm_model,
68
+ # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "load_in_8bit": True}
69
+ temperature = temperature,
70
+ max_new_tokens = max_tokens,
71
+ top_k = top_k,
72
+ load_in_8bit = True,
73
+ )
74
+ else:
75
+ llm = HuggingFaceEndpoint(
76
+ repo_id=llm_model,
77
+ # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "trust_remote_code": True, "torch_dtype": "auto"}
78
+ # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k}
79
+ temperature = temperature,
80
+ max_new_tokens = max_tokens,
81
+ top_k = top_k,
82
+ )
83
+
84
+ # progress(0.75, desc="Defining buffer memory...")
85
+ memory = ConversationBufferMemory(
86
+ memory_key="chat_history",
87
+ output_key='answer',
88
+ return_messages=True
89
+ )
90
+ # retriever=vector_db.as_retriever(search_type="similarity", search_kwargs={'k': 3})
91
+ retriever=vector_db.as_retriever()
92
+ # progress(0.8, desc="Defining retrieval chain...")
93
+ qa_chain = ConversationalRetrievalChain.from_llm(
94
+ llm,
95
+ retriever=retriever,
96
+ chain_type="stuff",
97
+ memory=memory,
98
+ # combine_docs_chain_kwargs={"prompt": your_prompt})
99
+ return_source_documents=True,
100
+ #return_generated_question=False,
101
+ verbose=False,
102
+ )
103
+ # progress(0.9, desc="LLM Chain Initialized!")
104
+ return qa_chain
105
+
106
+
107
+ # Generate collection name for vector database
108
+ # - Use filepath as input, ensuring unicode text
109
+ def create_collection_name(filepath):
110
+ # Extract filename without extension
111
+ collection_name = Path(filepath).stem
112
+ # Fix potential issues from naming convention
113
+ ## Remove space
114
+ collection_name = collection_name.replace(" ","-")
115
+ ## ASCII transliterations of Unicode text
116
+ collection_name = unidecode(collection_name)
117
+ ## Remove special characters
118
+ #collection_name = re.findall("[\dA-Za-z]*", collection_name)[0]
119
+ collection_name = re.sub('[^A-Za-z0-9]+', '-', collection_name)
120
+ ## Limit length to 50 characters
121
+ collection_name = collection_name[:50]
122
+ ## Minimum length of 3 characters
123
+ if len(collection_name) < 3:
124
+ collection_name = collection_name + 'xyz'
125
+ ## Enforce start and end as alphanumeric character
126
+ if not collection_name[0].isalnum():
127
+ collection_name = 'A' + collection_name[1:]
128
+ if not collection_name[-1].isalnum():
129
+ collection_name = collection_name[:-1] + 'Z'
130
+ print('Filepath: ', filepath)
131
+ print('Collection name: ', collection_name)
132
+ return collection_name
133
+
134
+ def format_chat_history(message, chat_history):
135
+ formatted_chat_history = []
136
+ for user_message, bot_message in chat_history:
137
+ formatted_chat_history.append(f"User: {user_message}")
138
+ formatted_chat_history.append(f"Assistant: {bot_message}")
139
+ return formatted_chat_history
140
+
141
+
142
+ def conversation(qa_chain, message, history):
143
+ formatted_chat_history = format_chat_history(message, history)
144
+ #print("formatted_chat_history",formatted_chat_history)
145
+
146
+ # Generate response using QA chain
147
+ response = qa_chain({"question": message, "chat_history": formatted_chat_history})
148
+ response_answer = response["answer"]
149
+ if response_answer.find("Helpful Answer:") != -1:
150
+ response_answer = response_answer.split("Helpful Answer:")[-1]
151
+ response_sources = response["source_documents"]
152
+ response_source1 = response_sources[0].page_content.strip()
153
+ response_source2 = response_sources[1].page_content.strip()
154
+ response_source3 = response_sources[2].page_content.strip()
155
+ # Langchain sources are zero-based
156
+ response_source1_page = response_sources[0].metadata["page"] + 1
157
+ response_source2_page = response_sources[1].metadata["page"] + 1
158
+ response_source3_page = response_sources[2].metadata["page"] + 1
159
+ # print ('chat response: ', response_answer)
160
+ # print('DB source', response_sources)
161
+
162
+ # Append user message and response to chat history
163
+ new_history = history + [(message, response_answer)]
164
+ # return gr.update(value=""), new_history, response_sources[0], response_sources[1]
165
+ return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page
166
+
167
+
168
+
169
+ def save_settings(list_file_obj, chunk_size, chunk_overlap, llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
170
+ # Create list of documents (when valid)
171
+ list_file_path = [x.name for x in list_file_obj if x is not None]
172
+ # Create collection_name for vector database
173
+ progress(0.1, desc="Creating collection name...")
174
+ collection_name = create_collection_name(list_file_path[0])
175
+ progress(0.25, desc="Loading document...")
176
+ # Load document and create splits
177
+ doc_splits = load_doc(list_file_path, chunk_size, chunk_overlap)
178
+ # Create or load vector database
179
+ progress(0.3, desc="Generating vector database...")
180
+ # global vector_db
181
+ vector_db = create_db(doc_splits, collection_name)
182
+ progress(0.5, desc="Vector database generated! Initializing LLM Chain")
183
+
184
+
185
+ # print("llm_option",llm_option)
186
+ llm_name = list_llm[llm_option]
187
+ print("llm_name: ",llm_name)
188
+ qa_chain = initialize_llmchain(llm_name, llm_temperature, max_tokens, top_k, vector_db)
189
+ progress(0.9, desc="Creating collection name...")
190
+ return vector_db, collection_name, qa_chain, "Complete! please go to the Chat tab"
191
+
192
+
193
+ def demo():
194
+ with gr.Blocks(theme="base") as demo:
195
+ vector_db = gr.State()
196
+ qa_chain = gr.State()
197
+ collection_name = gr.State()
198
+
199
+ gr.Markdown(
200
+ """<center><h2>Chat with PDF using open-source LLMs</center></h2>
201
+ <h3>Ask any questions about your PDF documents</h3>
202
+ <b>Note:</b> This AI assistant performs retrieval-augmented generation from your PDF documents. \
203
+ please make sure the settings are correct, then save settings to initialize the chatbot, then go to Chat tab to start the conversation
204
+ <br><b>Warning:</b> This space uses the free CPU Basic hardware from Hugging Face. Some steps and LLM models can take some time to generate an output.<br>
205
+ """)
206
+ with gr.Tab("Settings"):
207
+ with gr.Row():
208
+ document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your PDF documents (single or multiple)")
209
+
210
+ with gr.Accordion("Advanced options - Document text splitter", open=False):
211
+ with gr.Row():
212
+ slider_chunk_size = gr.Slider(minimum = 100, maximum = 1000, value=600, step=20, label="Chunk size", info="Chunk size", interactive=True)
213
+ with gr.Row():
214
+ slider_chunk_overlap = gr.Slider(minimum = 10, maximum = 200, value=40, step=10, label="Chunk overlap", info="Chunk overlap", interactive=True)
215
+ with gr.Row():
216
+ llm_btn = gr.Radio(list_llm_simple, \
217
+ label="LLM models", value = list_llm_simple[0], type="index", info="Choose your LLM model")
218
+ with gr.Accordion("Advanced options - LLM model", open=False):
219
+ with gr.Row():
220
+ slider_temperature = gr.Slider(minimum = 0.0, maximum = 1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True)
221
+ with gr.Row():
222
+ slider_maxtokens = gr.Slider(minimum = 224, maximum = 4096, value=1024, step=32, label="Max Tokens", info="Model max tokens", interactive=True)
223
+ with gr.Row():
224
+ slider_topk = gr.Slider(minimum = 1, maximum = 10, value=3, step=1, label="top-k samples", info="Model top-k samples", interactive=True)
225
+ with gr.Row():
226
+ llm_progress = gr.Textbox(value="",label="")
227
+ with gr.Row():
228
+ save_btn = gr.Button("Save settings")
229
+
230
+ with gr.Tab("Chat"):
231
+ chatbot = gr.Chatbot(height=500)
232
+ with gr.Accordion("Advanced - Document references", open=False):
233
+ with gr.Row():
234
+ doc_source1 = gr.Textbox(label="Reference 1", lines=2, container=True, scale=20)
235
+ source1_page = gr.Number(label="Page", scale=1)
236
+ with gr.Row():
237
+ doc_source2 = gr.Textbox(label="Reference 2", lines=2, container=True, scale=20)
238
+ source2_page = gr.Number(label="Page", scale=1)
239
+ with gr.Row():
240
+ doc_source3 = gr.Textbox(label="Reference 3", lines=2, container=True, scale=20)
241
+ source3_page = gr.Number(label="Page", scale=1)
242
+ with gr.Row():
243
+ msg = gr.Textbox(placeholder="Type message", container=True)
244
+ with gr.Row():
245
+ submit_btn = gr.Button("Submit")
246
+ clear_btn = gr.ClearButton([msg, chatbot])
247
+
248
+ save_btn.click(save_settings, \
249
+ inputs=[document, slider_chunk_size, slider_chunk_overlap, llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db], \
250
+ outputs=[vector_db, collection_name, qa_chain, llm_progress]).then(lambda:[None,"",0,"",0,"",0], \
251
+ inputs=None, \
252
+ outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
253
+ queue=False)
254
+
255
+ # Chatbot events
256
+ msg.submit(conversation, \
257
+ inputs=[qa_chain, msg, chatbot], \
258
+ outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
259
+ queue=False)
260
+ submit_btn.click(conversation, \
261
+ inputs=[qa_chain, msg, chatbot], \
262
+ outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
263
+ queue=False)
264
+ clear_btn.click(lambda:[None,"",0,"",0,"",0], \
265
+ inputs=None, \
266
+ outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
267
+ queue=False)
268
+ demo.queue().launch(debug=True)
269
+
270
+
271
+ if __name__ == "__main__":
272
+ demo()