Gurnam-AI commited on
Commit
4fb80b3
1 Parent(s): 53a4bb8

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +387 -0
app.py ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+
4
+ from langchain_community.document_loaders import PyPDFLoader
5
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
6
+ from langchain_community.vectorstores import Chroma
7
+ from langchain.chains import ConversationalRetrievalChain
8
+ from langchain_community.embeddings import HuggingFaceEmbeddings
9
+ from langchain_community.llms import HuggingFacePipeline
10
+ from langchain.chains import ConversationChain
11
+ from langchain.memory import ConversationBufferMemory
12
+ from langchain_community.llms import HuggingFaceEndpoint
13
+ from langchain_google_vertexai import VertexAI
14
+
15
+ from pathlib import Path
16
+ import chromadb
17
+ from unidecode import unidecode
18
+
19
+ from transformers import AutoTokenizer
20
+ import transformers
21
+ import torch
22
+ import tqdm
23
+ import accelerate
24
+ import re
25
+
26
+ def get_credentials():
27
+ creds_json_str = os.getenv("GOOGLE_APPLICATION_CREDENTIALS_JSON")
28
+ if creds_json_str is None:
29
+ raise ValueError("GOOGLE_APPLICATION_CREDENTIALS_JSON not found in environment")
30
+
31
+ # create a temporary file
32
+ with tempfile.NamedTemporaryFile(mode="w+", delete=False, suffix=".json") as temp:
33
+ temp.write(creds_json_str) # write in json format
34
+ temp_filename = temp.name
35
+
36
+ return temp_filename
37
+
38
+ os.environ["GOOGLE_APPLICATION_CREDENTIALS"]= get_credentials()
39
+
40
+
41
+
42
+ list_llm = ["gemini-pro"]
43
+ list_llm_simple = [os.path.basename(llm) for llm in list_llm]
44
+
45
+ # Load PDF document and create doc splits
46
+ def load_doc(list_file_path, chunk_size, chunk_overlap):
47
+ # Processing for one document only
48
+ # loader = PyPDFLoader(file_path)
49
+ # pages = loader.load()
50
+ loaders = [PyPDFLoader(x) for x in list_file_path]
51
+ pages = []
52
+ for loader in loaders:
53
+ pages.extend(loader.load())
54
+ # text_splitter = RecursiveCharacterTextSplitter(chunk_size = 600, chunk_overlap = 50)
55
+ text_splitter = RecursiveCharacterTextSplitter(
56
+ chunk_size = chunk_size,
57
+ chunk_overlap = chunk_overlap)
58
+ doc_splits = text_splitter.split_documents(pages)
59
+ return doc_splits
60
+
61
+
62
+ # Create vector database
63
+ def create_db(splits, collection_name):
64
+ embedding = HuggingFaceEmbeddings()
65
+ new_client = chromadb.EphemeralClient()
66
+ vectordb = Chroma.from_documents(
67
+ documents=splits,
68
+ embedding=embedding,
69
+ client=new_client,
70
+ collection_name=collection_name,
71
+ # persist_directory=default_persist_directory
72
+ )
73
+ return vectordb
74
+
75
+
76
+ # Load vector database
77
+ def load_db():
78
+ embedding = HuggingFaceEmbeddings()
79
+ vectordb = Chroma(
80
+ # persist_directory=default_persist_directory,
81
+ embedding_function=embedding)
82
+ return vectordb
83
+
84
+
85
+ # Initialize langchain LLM chain
86
+ def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
87
+ progress(0.1, desc="Initializing HF tokenizer...")
88
+ # HuggingFacePipeline uses local model
89
+ # Note: it will download model locally...
90
+ # tokenizer=AutoTokenizer.from_pretrained(llm_model)
91
+ # progress(0.5, desc="Initializing HF pipeline...")
92
+ # pipeline=transformers.pipeline(
93
+ # "text-generation",
94
+ # model=llm_model,
95
+ # tokenizer=tokenizer,
96
+ # torch_dtype=torch.bfloat16,
97
+ # trust_remote_code=True,
98
+ # device_map="auto",
99
+ # # max_length=1024,
100
+ # max_new_tokens=max_tokens,
101
+ # do_sample=True,
102
+ # top_k=top_k,
103
+ # num_return_sequences=1,
104
+ # eos_token_id=tokenizer.eos_token_id
105
+ # )
106
+ # llm = HuggingFacePipeline(pipeline=pipeline, model_kwargs={'temperature': temperature})
107
+
108
+ # HuggingFaceHub uses HF inference endpoints
109
+ progress(0.5, desc="Initializing HF Hub...")
110
+ llm = VertexAI(model_name="gemini-pro")
111
+ # Use of trust_remote_code as model_kwargs
112
+ # Warning: langchain issue
113
+ # URL: https://github.com/langchain-ai/langchain/issues/6080
114
+ # if llm_model == "mistralai/Mixtral-8x7B-Instruct-v0.1":
115
+ # llm = HuggingFaceEndpoint(
116
+ # repo_id=llm_model,
117
+ # # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "load_in_8bit": True}
118
+ # temperature = temperature,
119
+ # max_new_tokens = max_tokens,
120
+ # top_k = top_k,
121
+ # load_in_8bit = True,
122
+ # )
123
+ # elif llm_model in ["HuggingFaceH4/zephyr-7b-gemma-v0.1","mosaicml/mpt-7b-instruct"]:
124
+ # raise gr.Error("LLM model is too large to be loaded automatically on free inference endpoint")
125
+ # llm = HuggingFaceEndpoint(
126
+ # repo_id=llm_model,
127
+ # temperature = temperature,
128
+ # max_new_tokens = max_tokens,
129
+ # top_k = top_k,
130
+ # )
131
+ # elif llm_model == "microsoft/phi-2":
132
+ # # raise gr.Error("phi-2 model requires 'trust_remote_code=True', currently not supported by langchain HuggingFaceHub...")
133
+ # llm = HuggingFaceEndpoint(
134
+ # repo_id=llm_model,
135
+ # # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "trust_remote_code": True, "torch_dtype": "auto"}
136
+ # temperature = temperature,
137
+ # max_new_tokens = max_tokens,
138
+ # top_k = top_k,
139
+ # trust_remote_code = True,
140
+ # torch_dtype = "auto",
141
+ # )
142
+ # elif llm_model == "TinyLlama/TinyLlama-1.1B-Chat-v1.0":
143
+ # llm = HuggingFaceEndpoint(
144
+ # repo_id=llm_model,
145
+ # # model_kwargs={"temperature": temperature, "max_new_tokens": 250, "top_k": top_k}
146
+ # temperature = temperature,
147
+ # max_new_tokens = 250,
148
+ # top_k = top_k,
149
+ # )
150
+ # elif llm_model == "meta-llama/Llama-2-7b-chat-hf":
151
+ # raise gr.Error("Llama-2-7b-chat-hf model requires a Pro subscription...")
152
+ # llm = HuggingFaceEndpoint(
153
+ # repo_id=llm_model,
154
+ # # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k}
155
+ # temperature = temperature,
156
+ # max_new_tokens = max_tokens,
157
+ # top_k = top_k,
158
+ # )
159
+ # else:
160
+ # llm = HuggingFaceEndpoint(
161
+ # repo_id=llm_model,
162
+ # # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "trust_remote_code": True, "torch_dtype": "auto"}
163
+ # # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k}
164
+ # temperature = temperature,
165
+ # max_new_tokens = max_tokens,
166
+ # top_k = top_k,
167
+ # )
168
+
169
+ progress(0.75, desc="Defining buffer memory...")
170
+ memory = ConversationBufferMemory(
171
+ memory_key="chat_history",
172
+ output_key='answer',
173
+ return_messages=True
174
+ )
175
+ # retriever=vector_db.as_retriever(search_type="similarity", search_kwargs={'k': 3})
176
+ retriever=vector_db.as_retriever()
177
+ progress(0.8, desc="Defining retrieval chain...")
178
+ qa_chain = ConversationalRetrievalChain.from_llm(
179
+ llm,
180
+ retriever=retriever,
181
+ chain_type="stuff",
182
+ memory=memory,
183
+ # combine_docs_chain_kwargs={"prompt": your_prompt})
184
+ return_source_documents=True,
185
+ #return_generated_question=False,
186
+ verbose=False,
187
+ )
188
+ progress(0.9, desc="Done!")
189
+ return qa_chain
190
+
191
+
192
+ # Generate collection name for vector database
193
+ # - Use filepath as input, ensuring unicode text
194
+ def create_collection_name(filepath):
195
+ # Extract filename without extension
196
+ collection_name = Path(filepath).stem
197
+ # Fix potential issues from naming convention
198
+ ## Remove space
199
+ collection_name = collection_name.replace(" ","-")
200
+ ## ASCII transliterations of Unicode text
201
+ collection_name = unidecode(collection_name)
202
+ ## Remove special characters
203
+ #collection_name = re.findall("[\dA-Za-z]*", collection_name)[0]
204
+ collection_name = re.sub('[^A-Za-z0-9]+', '-', collection_name)
205
+ ## Limit length to 50 characters
206
+ collection_name = collection_name[:50]
207
+ ## Minimum length of 3 characters
208
+ if len(collection_name) < 3:
209
+ collection_name = collection_name + 'xyz'
210
+ ## Enforce start and end as alphanumeric character
211
+ if not collection_name[0].isalnum():
212
+ collection_name = 'A' + collection_name[1:]
213
+ if not collection_name[-1].isalnum():
214
+ collection_name = collection_name[:-1] + 'Z'
215
+ print('Filepath: ', filepath)
216
+ print('Collection name: ', collection_name)
217
+ return collection_name
218
+
219
+
220
+ # Initialize database
221
+ def initialize_database(list_file_obj, chunk_size, chunk_overlap, progress=gr.Progress()):
222
+ # Create list of documents (when valid)
223
+ list_file_path = [x.name for x in list_file_obj if x is not None]
224
+ # Create collection_name for vector database
225
+ progress(0.1, desc="Creating collection name...")
226
+ collection_name = create_collection_name(list_file_path[0])
227
+ progress(0.25, desc="Loading document...")
228
+ # Load document and create splits
229
+ doc_splits = load_doc(list_file_path, chunk_size, chunk_overlap)
230
+ # Create or load vector database
231
+ progress(0.5, desc="Generating vector database...")
232
+ # global vector_db
233
+ vector_db = create_db(doc_splits, collection_name)
234
+ progress(0.9, desc="Done!")
235
+ return vector_db, collection_name, "Complete!"
236
+
237
+
238
+ def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
239
+ # print("llm_option",llm_option)
240
+ llm_name = list_llm[llm_option]
241
+ print("llm_name: ",llm_name)
242
+ qa_chain = initialize_llmchain(llm_name, llm_temperature, max_tokens, top_k, vector_db, progress)
243
+ return qa_chain, "Complete!"
244
+
245
+
246
+ def format_chat_history(message, chat_history):
247
+ formatted_chat_history = []
248
+ for user_message, bot_message in chat_history:
249
+ formatted_chat_history.append(f"User: {user_message}")
250
+ formatted_chat_history.append(f"Assistant: {bot_message}")
251
+ return formatted_chat_history
252
+
253
+
254
+ def conversation(qa_chain, message, history):
255
+ formatted_chat_history = format_chat_history(message, history)
256
+ #print("formatted_chat_history",formatted_chat_history)
257
+
258
+ # Generate response using QA chain
259
+ response = qa_chain({"question": message, "chat_history": formatted_chat_history})
260
+ response_answer = response["answer"]
261
+ if response_answer.find("Helpful Answer:") != -1:
262
+ response_answer = response_answer.split("Helpful Answer:")[-1]
263
+ response_sources = response["source_documents"]
264
+ response_source1 = response_sources[0].page_content.strip()
265
+ response_source2 = response_sources[1].page_content.strip()
266
+ response_source3 = response_sources[2].page_content.strip()
267
+ # Langchain sources are zero-based
268
+ response_source1_page = response_sources[0].metadata["page"] + 1
269
+ response_source2_page = response_sources[1].metadata["page"] + 1
270
+ response_source3_page = response_sources[2].metadata["page"] + 1
271
+ # print ('chat response: ', response_answer)
272
+ # print('DB source', response_sources)
273
+
274
+ # Append user message and response to chat history
275
+ new_history = history + [(message, response_answer)]
276
+ # return gr.update(value=""), new_history, response_sources[0], response_sources[1]
277
+ return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page
278
+
279
+
280
+ def upload_file(file_obj):
281
+ list_file_path = []
282
+ for idx, file in enumerate(file_obj):
283
+ file_path = file_obj.name
284
+ list_file_path.append(file_path)
285
+ # print(file_path)
286
+ # initialize_database(file_path, progress)
287
+ return list_file_path
288
+
289
+
290
+ def demo():
291
+ with gr.Blocks(theme="base") as demo:
292
+ vector_db = gr.State()
293
+ qa_chain = gr.State()
294
+ collection_name = gr.State()
295
+
296
+ gr.Markdown(
297
+ """<center><h2>PDF-based chatbot</center></h2>
298
+ <h3>Ask any questions about your PDF documents</h3>""")
299
+ gr.Markdown(
300
+ """<b>Note:</b> This AI assistant, using Langchain and open-source LLMs, performs retrieval-augmented generation (RAG) from your PDF documents. \
301
+ The user interface explicitely shows multiple steps to help understand the RAG workflow.
302
+ This chatbot takes past questions into account when generating answers (via conversational memory), and includes document references for clarity purposes.<br>
303
+ <br><b>Warning:</b> This space uses the free CPU Basic hardware from Hugging Face. Some steps and LLM models used below (free inference endpoints) can take some time to generate a reply.
304
+ """)
305
+
306
+ with gr.Tab("Step 1 - Upload PDF"):
307
+ with gr.Row():
308
+ document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your PDF documents (single or multiple)")
309
+ # upload_btn = gr.UploadButton("Loading document...", height=100, file_count="multiple", file_types=["pdf"], scale=1)
310
+
311
+ with gr.Tab("Step 2 - Process document"):
312
+ with gr.Row():
313
+ db_btn = gr.Radio(["ChromaDB"], label="Vector database type", value = "ChromaDB", type="index", info="Choose your vector database")
314
+ with gr.Accordion("Advanced options - Document text splitter", open=False):
315
+ with gr.Row():
316
+ slider_chunk_size = gr.Slider(minimum = 100, maximum = 1000, value=600, step=20, label="Chunk size", info="Chunk size", interactive=True)
317
+ with gr.Row():
318
+ slider_chunk_overlap = gr.Slider(minimum = 10, maximum = 200, value=40, step=10, label="Chunk overlap", info="Chunk overlap", interactive=True)
319
+ with gr.Row():
320
+ db_progress = gr.Textbox(label="Vector database initialization", value="None")
321
+ with gr.Row():
322
+ db_btn = gr.Button("Generate vector database")
323
+
324
+ with gr.Tab("Step 3 - Initialize QA chain"):
325
+ with gr.Row():
326
+ llm_btn = gr.Radio(list_llm_simple, \
327
+ label="LLM models", value = list_llm_simple[0], type="index", info="Choose your LLM model")
328
+ with gr.Accordion("Advanced options - LLM model", open=False):
329
+ with gr.Row():
330
+ slider_temperature = gr.Slider(minimum = 0.01, maximum = 1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True)
331
+ with gr.Row():
332
+ slider_maxtokens = gr.Slider(minimum = 224, maximum = 4096, value=1024, step=32, label="Max Tokens", info="Model max tokens", interactive=True)
333
+ with gr.Row():
334
+ slider_topk = gr.Slider(minimum = 1, maximum = 10, value=3, step=1, label="top-k samples", info="Model top-k samples", interactive=True)
335
+ with gr.Row():
336
+ llm_progress = gr.Textbox(value="None",label="QA chain initialization")
337
+ with gr.Row():
338
+ qachain_btn = gr.Button("Initialize Question Answering chain")
339
+
340
+ with gr.Tab("Step 4 - Chatbot"):
341
+ chatbot = gr.Chatbot(height=300)
342
+ with gr.Accordion("Advanced - Document references", open=False):
343
+ with gr.Row():
344
+ doc_source1 = gr.Textbox(label="Reference 1", lines=2, container=True, scale=20)
345
+ source1_page = gr.Number(label="Page", scale=1)
346
+ with gr.Row():
347
+ doc_source2 = gr.Textbox(label="Reference 2", lines=2, container=True, scale=20)
348
+ source2_page = gr.Number(label="Page", scale=1)
349
+ with gr.Row():
350
+ doc_source3 = gr.Textbox(label="Reference 3", lines=2, container=True, scale=20)
351
+ source3_page = gr.Number(label="Page", scale=1)
352
+ with gr.Row():
353
+ msg = gr.Textbox(placeholder="Type message (e.g. 'What is this document about?')", container=True)
354
+ with gr.Row():
355
+ submit_btn = gr.Button("Submit message")
356
+ clear_btn = gr.ClearButton([msg, chatbot], value="Clear conversation")
357
+
358
+ # Preprocessing events
359
+ #upload_btn.upload(upload_file, inputs=[upload_btn], outputs=[document])
360
+ db_btn.click(initialize_database, \
361
+ inputs=[document, slider_chunk_size, slider_chunk_overlap], \
362
+ outputs=[vector_db, collection_name, db_progress])
363
+ qachain_btn.click(initialize_LLM, \
364
+ inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db], \
365
+ outputs=[qa_chain, llm_progress]).then(lambda:[None,"",0,"",0,"",0], \
366
+ inputs=None, \
367
+ outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
368
+ queue=False)
369
+
370
+ # Chatbot events
371
+ msg.submit(conversation, \
372
+ inputs=[qa_chain, msg, chatbot], \
373
+ outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
374
+ queue=False)
375
+ submit_btn.click(conversation, \
376
+ inputs=[qa_chain, msg, chatbot], \
377
+ outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
378
+ queue=False)
379
+ clear_btn.click(lambda:[None,"",0,"",0,"",0], \
380
+ inputs=None, \
381
+ outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
382
+ queue=False)
383
+ demo.queue().launch(debug=True)
384
+
385
+
386
+ if __name__ == "__main__":
387
+ demo()