TroglodyteDerivations commited on
Commit
a4ad042
1 Parent(s): f79c8ca

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +329 -0
app.py ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+
4
+ os.environ["HUGGINGFACE_HUB_API_TOKEN"] = HUGGINGFACE_HUB_API_TOKEN
5
+
6
+ from langchain_community.document_loaders import PyPDFLoader
7
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
8
+ from langchain_community.vectorstores import Chroma
9
+ from langchain.chains import ConversationalRetrievalChain
10
+ from langchain_community.embeddings import HuggingFaceEmbeddings
11
+ from langchain_community.llms import HuggingFacePipeline
12
+ from langchain.chains import ConversationChain
13
+ from langchain.memory import ConversationBufferMemory
14
+ from langchain_community.llms import HuggingFaceEndpoint
15
+
16
+ from pathlib import Path
17
+ import chromadb
18
+ from unidecode import unidecode
19
+
20
+ from transformers import AutoTokenizer
21
+ import transformers
22
+ import torch
23
+ import tqdm
24
+ import accelerate
25
+ import re
26
+
27
+ # default_persist_directory = './chroma_HF/'
28
+ list_llm = ["mistralai/Mistral-7B-Instruct-v0.2",
29
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
30
+ "mistralai/Mistral-7B-Instruct-v0.1",
31
+ "tiiuae/falcon-7b-instruct",
32
+ ]
33
+ list_llm_simple = [os.path.basename(llm) for llm in list_llm]
34
+
35
+ # Load PDF document and create doc splits
36
+ def load_doc(list_file_path, chunk_size, chunk_overlap):
37
+ # Processing for one document only
38
+ # loader = PyPDFLoader(file_path)
39
+ # pages = loader.load()
40
+ loaders = [PyPDFLoader(x) for x in list_file_path]
41
+ pages = []
42
+ for loader in loaders:
43
+ pages.extend(loader.load())
44
+ # text_splitter = RecursiveCharacterTextSplitter(chunk_size = 600, chunk_overlap = 50)
45
+ text_splitter = RecursiveCharacterTextSplitter(
46
+ chunk_size = chunk_size,
47
+ chunk_overlap = chunk_overlap)
48
+ doc_splits = text_splitter.split_documents(pages)
49
+ return doc_splits
50
+
51
+ # Create vector database
52
+ def create_db(splits, collection_name):
53
+ embedding = HuggingFaceEmbeddings()
54
+ new_client = chromadb.EphemeralClient()
55
+ vectordb = Chroma.from_documents(
56
+ documents=splits,
57
+ embedding=embedding,
58
+ client=new_client,
59
+ collection_name=collection_name,
60
+ # persist_directory=default_persist_directory
61
+ )
62
+ return vectordb
63
+
64
+ # Load vector database
65
+ def load_db():
66
+ embedding = HuggingFaceEmbeddings()
67
+ vectordb = Chroma(
68
+ # persist_directory=default_persist_directory,
69
+ embedding_function=embedding)
70
+ return vectordb
71
+
72
+ # Initialize langchain LLM chain
73
+ def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
74
+ progress(0.1, desc="Initializing HF tokenizer...")
75
+ # HuggingFacePipeline uses local model
76
+ # Note: it will download model locally...
77
+ # tokenizer=AutoTokenizer.from_pretrained(llm_model)
78
+ # progress(0.5, desc="Initializing HF pipeline...")
79
+ # pipeline=transformers.pipeline(
80
+ # "text-generation",
81
+ # model=llm_model,
82
+ # tokenizer=tokenizer,
83
+ # torch_dtype=torch.bfloat16,
84
+ # trust_remote_code=True,
85
+ # device_map="auto",
86
+ # # max_length=1024,
87
+ # max_new_tokens=max_tokens,
88
+ # do_sample=True,
89
+ # top_k=top_k,
90
+ # num_return_sequences=1,
91
+ # eos_token_id=tokenizer.eos_token_id
92
+ # )
93
+ # llm = HuggingFacePipeline(pipeline=pipeline, model_kwargs={'temperature': temperature})
94
+
95
+ # HuggingFaceHub uses HF inference endpoints
96
+ progress(0.5, desc="Initializing HF Hub...")
97
+ # Use of trust_remote_code as model_kwargs
98
+ # Warning: langchain issue
99
+ # URL: https://github.com/langchain-ai/langchain/issues/6080
100
+ if llm_model in ["mistralai/Mistral-7B-Instruct-v0.2",
101
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
102
+ "mistralai/Mistral-7B-Instruct-v0.1",
103
+ "tiiuae/falcon-7b-instruct"]:
104
+ llm = HuggingFaceEndpoint(
105
+ raise gr.Error("LLM model is too large to be loaded automatically on free inference endpoint")
106
+ repo_id=llm_model,
107
+ token=HUGGINGFACEHUB_API_TOKEN,
108
+ # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "load_in_8bit": True}
109
+ temperature = temperature,
110
+ max_new_tokens = max_tokens,
111
+ top_k = top_k,
112
+ load_in_8bit = True,
113
+ )
114
+ progress(0.75, desc="Defining buffer memory...")
115
+ memory = ConversationBufferMemory(
116
+ memory_key="chat_history",
117
+ output_key='answer',
118
+ return_messages=True
119
+ )
120
+ # retriever=vector_db.as_retriever(search_type="similarity", search_kwargs={'k': 3})
121
+ retriever=vector_db.as_retriever()
122
+ progress(0.8, desc="Defining retrieval chain...")
123
+ qa_chain = ConversationalRetrievalChain.from_llm(
124
+ llm,
125
+ retriever=retriever,
126
+ chain_type="stuff",
127
+ memory=memory,
128
+ # combine_docs_chain_kwargs={"prompt": your_prompt})
129
+ return_source_documents=True,
130
+ #return_generated_question=False,
131
+ verbose=False,
132
+ )
133
+ progress(0.9, desc="Done!")
134
+ return qa_chain
135
+
136
+ # Generate collection name for vector database
137
+ # - Use filepath as input, ensuring unicode text
138
+ def create_collection_name(filepath):
139
+ # Extract filename without extension
140
+ collection_name = Path(filepath).stem
141
+ # Fix potential issues from naming convention
142
+ ## Remove space
143
+ collection_name = collection_name.replace(" ","-")
144
+ ## ASCII transliterations of Unicode text
145
+ collection_name = unidecode(collection_name)
146
+ ## Remove special characters
147
+ #collection_name = re.findall("[\dA-Za-z]*", collection_name)[0]
148
+ collection_name = re.sub('[^A-Za-z0-9]+', '-', collection_name)
149
+ ## Limit length to 50 characters
150
+ collection_name = collection_name[:50]
151
+ ## Minimum length of 3 characters
152
+ if len(collection_name) < 3:
153
+ collection_name = collection_name + 'xyz'
154
+ ## Enforce start and end as alphanumeric character
155
+ if not collection_name[0].isalnum():
156
+ collection_name = 'A' + collection_name[1:]
157
+ if not collection_name[-1].isalnum():
158
+ collection_name = collection_name[:-1] + 'Z'
159
+ print('Filepath: ', filepath)
160
+ print('Collection name: ', collection_name)
161
+ return collection_name
162
+
163
+
164
+ # Initialize database
165
+ def initialize_database(list_file_obj, chunk_size, chunk_overlap, progress=gr.Progress()):
166
+ # Create list of documents (when valid)
167
+ list_file_path = [x.name for x in list_file_obj if x is not None]
168
+ # Create collection_name for vector database
169
+ progress(0.1, desc="Creating collection name...")
170
+ collection_name = create_collection_name(list_file_path[0])
171
+ progress(0.25, desc="Loading document...")
172
+ # Load document and create splits
173
+ doc_splits = load_doc(list_file_path, chunk_size, chunk_overlap)
174
+ # Create or load vector database
175
+ progress(0.5, desc="Generating vector database...")
176
+ # global vector_db
177
+ vector_db = create_db(doc_splits, collection_name)
178
+ progress(0.9, desc="Done!")
179
+ return vector_db, collection_name, "Complete!"
180
+
181
+
182
+ def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
183
+ # print("llm_option",llm_option)
184
+ llm_name = list_llm[llm_option]
185
+ print("llm_name: ",llm_name)
186
+ qa_chain = initialize_llmchain(llm_name, llm_temperature, max_tokens, top_k, vector_db, progress)
187
+ return qa_chain, "Complete!"
188
+
189
+
190
+ def format_chat_history(message, chat_history):
191
+ formatted_chat_history = []
192
+ for user_message, bot_message in chat_history:
193
+ formatted_chat_history.append(f"User: {user_message}")
194
+ formatted_chat_history.append(f"Assistant: {bot_message}")
195
+ return formatted_chat_history
196
+
197
+
198
+ def conversation(qa_chain, message, history):
199
+ formatted_chat_history = format_chat_history(message, history)
200
+ #print("formatted_chat_history",formatted_chat_history)
201
+
202
+ # Generate response using QA chain
203
+ response = qa_chain({"question": message, "chat_history": formatted_chat_history})
204
+ response_answer = response["answer"]
205
+ if response_answer.find("Helpful Answer:") != -1:
206
+ response_answer = response_answer.split("Helpful Answer:")[-1]
207
+ response_sources = response["source_documents"]
208
+ response_source1 = response_sources[0].page_content.strip()
209
+ response_source2 = response_sources[1].page_content.strip()
210
+ response_source3 = response_sources[2].page_content.strip()
211
+ # Langchain sources are zero-based
212
+ response_source1_page = response_sources[0].metadata["page"] + 1
213
+ response_source2_page = response_sources[1].metadata["page"] + 1
214
+ response_source3_page = response_sources[2].metadata["page"] + 1
215
+ # print ('chat response: ', response_answer)
216
+ # print('DB source', response_sources)
217
+
218
+ # Append user message and response to chat history
219
+ new_history = history + [(message, response_answer)]
220
+ # return gr.update(value=""), new_history, response_sources[0], response_sources[1]
221
+ return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page
222
+
223
+
224
+ def upload_file(file_obj):
225
+ list_file_path = []
226
+ for idx, file in enumerate(file_obj):
227
+ file_path = file_obj.name
228
+ list_file_path.append(file_path)
229
+ # print(file_path)
230
+ # initialize_database(file_path, progress)
231
+ return list_file_path
232
+
233
+
234
+ def demo():
235
+ with gr.Blocks(theme="base") as demo:
236
+ vector_db = gr.State()
237
+ qa_chain = gr.State()
238
+ collection_name = gr.State()
239
+
240
+ gr.Markdown(
241
+ """<center><h2>PDF-based chatbot (powered by LangChain and open-source LLMs)</center></h2>
242
+ <h3>Ask any questions about your PDF documents, along with follow-ups</h3>
243
+ <b>Note:</b> This AI assistant performs retrieval-augmented generation from your PDF documents. \
244
+ When generating answers, it takes past questions into account (via conversational memory), and includes document references for clarity purposes.</i>
245
+ <br><b>Warning:</b> This space uses the free CPU Basic hardware from Hugging Face. Some steps and LLM models used below (free inference endpoints) can take some time to generate an output.<br>
246
+ """)
247
+ with gr.Tab("Step 1 - Document pre-processing"):
248
+ with gr.Row():
249
+ document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your PDF documents (single or multiple)")
250
+ # upload_btn = gr.UploadButton("Loading document...", height=100, file_count="multiple", file_types=["pdf"], scale=1)
251
+ with gr.Row():
252
+ db_btn = gr.Radio(["ChromaDB"], label="Vector database type", value = "ChromaDB", type="index", info="Choose your vector database")
253
+ with gr.Accordion("Advanced options - Document text splitter", open=False):
254
+ with gr.Row():
255
+ slider_chunk_size = gr.Slider(minimum = 100, maximum = 1000, value=600, step=20, label="Chunk size", info="Chunk size", interactive=True)
256
+ with gr.Row():
257
+ slider_chunk_overlap = gr.Slider(minimum = 10, maximum = 200, value=40, step=10, label="Chunk overlap", info="Chunk overlap", interactive=True)
258
+ with gr.Row():
259
+ db_progress = gr.Textbox(label="Vector database initialization", value="None")
260
+ with gr.Row():
261
+ db_btn = gr.Button("Generate vector database...")
262
+
263
+ with gr.Tab("Step 2 - QA chain initialization"):
264
+ with gr.Row():
265
+ llm_btn = gr.Radio(list_llm_simple, \
266
+ label="LLM models", value = list_llm_simple[0], type="index", info="Choose your LLM model")
267
+ with gr.Accordion("Advanced options - LLM model", open=False):
268
+ with gr.Row():
269
+ slider_temperature = gr.Slider(minimum = 0.0, maximum = 1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True)
270
+ with gr.Row():
271
+ slider_maxtokens = gr.Slider(minimum = 224, maximum = 4096, value=1024, step=32, label="Max Tokens", info="Model max tokens", interactive=True)
272
+ with gr.Row():
273
+ slider_topk = gr.Slider(minimum = 1, maximum = 10, value=3, step=1, label="top-k samples", info="Model top-k samples", interactive=True)
274
+ with gr.Row():
275
+ llm_progress = gr.Textbox(value="None",label="QA chain initialization")
276
+ with gr.Row():
277
+ qachain_btn = gr.Button("Initialize question-answering chain...")
278
+
279
+ with gr.Tab("Step 3 - Conversation with chatbot"):
280
+ chatbot = gr.Chatbot(height=300)
281
+ with gr.Accordion("Advanced - Document references", open=False):
282
+ with gr.Row():
283
+ doc_source1 = gr.Textbox(label="Reference 1", lines=2, container=True, scale=20)
284
+ source1_page = gr.Number(label="Page", scale=1)
285
+ with gr.Row():
286
+ doc_source2 = gr.Textbox(label="Reference 2", lines=2, container=True, scale=20)
287
+ source2_page = gr.Number(label="Page", scale=1)
288
+ with gr.Row():
289
+ doc_source3 = gr.Textbox(label="Reference 3", lines=2, container=True, scale=20)
290
+ source3_page = gr.Number(label="Page", scale=1)
291
+ with gr.Row():
292
+ msg = gr.Textbox(placeholder="Type message", container=True)
293
+ with gr.Row():
294
+ submit_btn = gr.Button("Submit")
295
+ clear_btn = gr.ClearButton([msg, chatbot])
296
+
297
+ # Preprocessing events
298
+ #upload_btn.upload(upload_file, inputs=[upload_btn], outputs=[document])
299
+ db_btn.click(initialize_database, \
300
+ inputs=[document, slider_chunk_size, slider_chunk_overlap], \
301
+ outputs=[vector_db, collection_name, db_progress])
302
+ qachain_btn.click(initialize_LLM, \
303
+ inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db], \
304
+ outputs=[qa_chain, llm_progress]).then(lambda:[None,"",0,"",0,"",0], \
305
+ inputs=None, \
306
+ outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
307
+ queue=False)
308
+
309
+ # Chatbot events
310
+ msg.submit(conversation, \
311
+ inputs=[qa_chain, msg, chatbot], \
312
+ outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
313
+ queue=False)
314
+ submit_btn.click(conversation, \
315
+ inputs=[qa_chain, msg, chatbot], \
316
+ outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
317
+ queue=False)
318
+ clear_btn.click(lambda:[None,"",0,"",0,"",0], \
319
+ inputs=None, \
320
+ outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
321
+ queue=False)
322
+ demo.queue().launch(debug=True)
323
+
324
+
325
+ if __name__ == "__main__":
326
+ demo()
327
+
328
+
329
+