Jaman commited on
Commit
ddb4481
1 Parent(s): 095ae0a
Files changed (2) hide show
  1. app.py +313 -0
  2. requirements.txt +9 -0
app.py ADDED
@@ -0,0 +1,313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+
4
+ from langchain.document_loaders import PyPDFLoader
5
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
6
+ from langchain.vectorstores import Chroma
7
+ from langchain.chains import ConversationalRetrievalChain
8
+ from langchain.embeddings import HuggingFaceEmbeddings
9
+ from langchain.llms import HuggingFacePipeline
10
+ from langchain.chains import ConversationChain
11
+ from langchain.memory import ConversationBufferMemory
12
+ from langchain.llms import HuggingFaceHub
13
+
14
+ from pathlib import Path
15
+ import chromadb
16
+
17
+ from transformers import AutoTokenizer
18
+ import transformers
19
+ import torch
20
+ import tqdm
21
+ import accelerate
22
+
23
+
24
+ # default_persist_directory = './chroma_HF/'
25
+ list_llm = ["mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.2", "mistralai/Mistral-7B-Instruct-v0.1", \
26
+ "HuggingFaceH4/zephyr-7b-beta", "meta-llama/Llama-2-7b-chat-hf", "microsoft/phi-2", \
27
+ "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "mosaicml/mpt-7b-instruct", "tiiuae/falcon-7b-instruct", \
28
+ "google/flan-t5-xxl"
29
+ ]
30
+ list_llm_simple = [os.path.basename(llm) for llm in list_llm]
31
+
32
+
33
+ # Load PDF document and create doc splits
34
+ def load_doc(list_file_path, chunk_size, chunk_overlap):
35
+ # Processing for one document only
36
+ # loader = PyPDFLoader(file_path)
37
+ # pages = loader.load()
38
+ loaders = [PyPDFLoader(x) for x in list_file_path]
39
+ pages = []
40
+ for loader in loaders:
41
+ pages.extend(loader.load())
42
+ # text_splitter = RecursiveCharacterTextSplitter(chunk_size = 600, chunk_overlap = 50)
43
+ text_splitter = RecursiveCharacterTextSplitter(
44
+ chunk_size = chunk_size,
45
+ chunk_overlap = chunk_overlap)
46
+ doc_splits = text_splitter.split_documents(pages)
47
+ return doc_splits
48
+
49
+
50
+ # Create vector database
51
+ def create_db(splits, collection_name):
52
+ embedding = HuggingFaceEmbeddings()
53
+ new_client = chromadb.EphemeralClient()
54
+ vectordb = Chroma.from_documents(
55
+ documents=splits,
56
+ embedding=embedding,
57
+ client=new_client,
58
+ collection_name=collection_name,
59
+ # persist_directory=default_persist_directory
60
+ )
61
+ return vectordb
62
+
63
+
64
+ # Load vector database
65
+ def load_db():
66
+ embedding = HuggingFaceEmbeddings()
67
+ vectordb = Chroma(
68
+ # persist_directory=default_persist_directory,
69
+ embedding_function=embedding)
70
+ return vectordb
71
+
72
+
73
+ # Initialize langchain LLM chain
74
+ def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
75
+ progress(0.1, desc="Initializing HF tokenizer...")
76
+ # HuggingFacePipeline uses local model
77
+ # Note: it will download model locally...
78
+ # tokenizer=AutoTokenizer.from_pretrained(llm_model)
79
+ # progress(0.5, desc="Initializing HF pipeline...")
80
+ # pipeline=transformers.pipeline(
81
+ # "text-generation",
82
+ # model=llm_model,
83
+ # tokenizer=tokenizer,
84
+ # torch_dtype=torch.bfloat16,
85
+ # trust_remote_code=True,
86
+ # device_map="auto",
87
+ # # max_length=1024,
88
+ # max_new_tokens=max_tokens,
89
+ # do_sample=True,
90
+ # top_k=top_k,
91
+ # num_return_sequences=1,
92
+ # eos_token_id=tokenizer.eos_token_id
93
+ # )
94
+ # llm = HuggingFacePipeline(pipeline=pipeline, model_kwargs={'temperature': temperature})
95
+
96
+ # HuggingFaceHub uses HF inference endpoints
97
+ progress(0.5, desc="Initializing HF Hub...")
98
+ # Use of trust_remote_code as model_kwargs
99
+ # Warning: langchain issue
100
+ # URL: https://github.com/langchain-ai/langchain/issues/6080
101
+ if llm_model == "mistralai/Mixtral-8x7B-Instruct-v0.1":
102
+ llm = HuggingFaceHub(
103
+ repo_id=llm_model,
104
+ model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "load_in_8bit": True}
105
+ )
106
+ elif llm_model == "microsoft/phi-2":
107
+ raise gr.Error("phi-2 model requires 'trust_remote_code=True', currently not supported by langchain HuggingFaceHub...")
108
+ llm = HuggingFaceHub(
109
+ repo_id=llm_model,
110
+ model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "trust_remote_code": True, "torch_dtype": "auto"}
111
+ )
112
+ elif llm_model == "TinyLlama/TinyLlama-1.1B-Chat-v1.0":
113
+ llm = HuggingFaceHub(
114
+ repo_id=llm_model,
115
+ model_kwargs={"temperature": temperature, "max_new_tokens": 250, "top_k": top_k}
116
+ )
117
+ elif llm_model == "meta-llama/Llama-2-7b-chat-hf":
118
+ raise gr.Error("Llama-2-7b-chat-hf model requires a Pro subscription...")
119
+ llm = HuggingFaceHub(
120
+ repo_id=llm_model,
121
+ model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k}
122
+ )
123
+ else:
124
+ llm = HuggingFaceHub(
125
+ repo_id=llm_model,
126
+ # model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "trust_remote_code": True, "torch_dtype": "auto"}
127
+ model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k}
128
+ )
129
+
130
+ progress(0.75, desc="Defining buffer memory...")
131
+ memory = ConversationBufferMemory(
132
+ memory_key="chat_history",
133
+ output_key='answer',
134
+ return_messages=True
135
+ )
136
+ # retriever=vector_db.as_retriever(search_type="similarity", search_kwargs={'k': 3})
137
+ retriever=vector_db.as_retriever()
138
+ progress(0.8, desc="Defining retrieval chain...")
139
+ qa_chain = ConversationalRetrievalChain.from_llm(
140
+ llm,
141
+ retriever=retriever,
142
+ chain_type="stuff",
143
+ memory=memory,
144
+ # combine_docs_chain_kwargs={"prompt": your_prompt})
145
+ return_source_documents=True,
146
+ #return_generated_question=False,
147
+ verbose=False,
148
+ )
149
+ progress(0.9, desc="Done!")
150
+ return qa_chain
151
+
152
+
153
+ # Initialize database
154
+ def initialize_database(list_file_obj, chunk_size, chunk_overlap, progress=gr.Progress()):
155
+ # Create list of documents (when valid)
156
+ list_file_path = [x.name for x in list_file_obj if x is not None]
157
+ # Create collection_name for vector database
158
+ progress(0.1, desc="Creating collection name...")
159
+ collection_name = Path(list_file_path[0]).stem
160
+ # Fix potential issues from naming convention
161
+ collection_name = collection_name.replace(" ","-")
162
+ collection_name = collection_name[:50]
163
+ # print('list_file_path: ', list_file_path)
164
+ print('Collection name: ', collection_name)
165
+ progress(0.25, desc="Loading document...")
166
+ # Load document and create splits
167
+ doc_splits = load_doc(list_file_path, chunk_size, chunk_overlap)
168
+ # Create or load vector database
169
+ progress(0.5, desc="Generating vector database...")
170
+ # global vector_db
171
+ vector_db = create_db(doc_splits, collection_name)
172
+ progress(0.9, desc="Done!")
173
+ return vector_db, collection_name, "Complete!"
174
+
175
+
176
+ def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
177
+ # print("llm_option",llm_option)
178
+ llm_name = list_llm[llm_option]
179
+ print("llm_name: ",llm_name)
180
+ qa_chain = initialize_llmchain(llm_name, llm_temperature, max_tokens, top_k, vector_db, progress)
181
+ return qa_chain, "Complete!"
182
+
183
+
184
+ def format_chat_history(message, chat_history):
185
+ formatted_chat_history = []
186
+ for user_message, bot_message in chat_history:
187
+ formatted_chat_history.append(f"User: {user_message}")
188
+ formatted_chat_history.append(f"Assistant: {bot_message}")
189
+ return formatted_chat_history
190
+
191
+
192
+ def conversation(qa_chain, message, history):
193
+ formatted_chat_history = format_chat_history(message, history)
194
+ #print("formatted_chat_history",formatted_chat_history)
195
+
196
+ # Generate response using QA chain
197
+ response = qa_chain({"question": message, "chat_history": formatted_chat_history})
198
+ response_answer = response["answer"]
199
+ response_sources = response["source_documents"]
200
+ response_source1 = response_sources[0].page_content.strip()
201
+ response_source2 = response_sources[1].page_content.strip()
202
+ # Langchain sources are zero-based
203
+ response_source1_page = response_sources[0].metadata["page"] + 1
204
+ response_source2_page = response_sources[1].metadata["page"] + 1
205
+ # print ('chat response: ', response_answer)
206
+ # print('DB source', response_sources)
207
+
208
+ # Append user message and response to chat history
209
+ new_history = history + [(message, response_answer)]
210
+ # return gr.update(value=""), new_history, response_sources[0], response_sources[1]
211
+ return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page
212
+
213
+
214
+ def upload_file(file_obj):
215
+ list_file_path = []
216
+ for idx, file in enumerate(file_obj):
217
+ file_path = file_obj.name
218
+ list_file_path.append(file_path)
219
+ # print(file_path)
220
+ # initialize_database(file_path, progress)
221
+ return list_file_path
222
+
223
+
224
+ def demo():
225
+ with gr.Blocks(theme="base") as demo:
226
+ vector_db = gr.State()
227
+ qa_chain = gr.State()
228
+ collection_name = gr.State()
229
+
230
+ gr.Markdown(
231
+ """<center><h2>RAG Langchain open-source LLMs Chatbot</center></h2>
232
+ <h3>Ask any questions about your PDF documents, along with follow-ups</h3>
233
+ <b>Note:</b> This AI assistant performs retrieval-augmented generation from your PDF documents. \
234
+ When generating answers, it takes past questions into account (via conversational memory), and includes document references for clarity purposes.</i>
235
+ <br><b>Warning:</b> This space uses the free CPU Basic hardware from Hugging Face. Some steps and LLM models used below (free inference endpoints) can take some time to generate an output.<br>
236
+ """)
237
+ with gr.Tab("Step 1 - Document pre-processing"):
238
+ with gr.Row():
239
+ document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your PDF documents (single or multiple)")
240
+ # upload_btn = gr.UploadButton("Loading document...", height=100, file_count="multiple", file_types=["pdf"], scale=1)
241
+ with gr.Row():
242
+ db_btn = gr.Radio(["ChromaDB"], label="Vector database type", value = "ChromaDB", type="index", info="Choose your vector database")
243
+ with gr.Accordion("Advanced options - Document text splitter", open=False):
244
+ with gr.Row():
245
+ slider_chunk_size = gr.Slider(minimum = 100, maximum = 1000, value=600, step=20, label="Chunk size", info="Chunk size", interactive=True)
246
+ with gr.Row():
247
+ slider_chunk_overlap = gr.Slider(minimum = 10, maximum = 200, value=40, step=10, label="Chunk overlap", info="Chunk overlap", interactive=True)
248
+ with gr.Row():
249
+ db_progress = gr.Textbox(label="Vector database initialization", value="None")
250
+ with gr.Row():
251
+ db_btn = gr.Button("Generate vector database...")
252
+
253
+ with gr.Tab("Step 2 - QA chain initialization"):
254
+ with gr.Row():
255
+ llm_btn = gr.Radio(list_llm_simple, \
256
+ label="LLM models", value = list_llm_simple[0], type="index", info="Choose your LLM model")
257
+ with gr.Accordion("Advanced options - LLM model", open=False):
258
+ with gr.Row():
259
+ slider_temperature = gr.Slider(minimum = 0.0, maximum = 1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True)
260
+ with gr.Row():
261
+ slider_maxtokens = gr.Slider(minimum = 224, maximum = 4096, value=1024, step=32, label="Max Tokens", info="Model max tokens", interactive=True)
262
+ with gr.Row():
263
+ slider_topk = gr.Slider(minimum = 1, maximum = 10, value=3, step=1, label="top-k samples", info="Model top-k samples", interactive=True)
264
+ with gr.Row():
265
+ llm_progress = gr.Textbox(value="None",label="QA chain initialization")
266
+ with gr.Row():
267
+ qachain_btn = gr.Button("Initialize question-answering chain...")
268
+
269
+ with gr.Tab("Step 3 - Conversation with chatbot"):
270
+ chatbot = gr.Chatbot(height=300)
271
+ with gr.Accordion("Advanced - Document references", open=False):
272
+ with gr.Row():
273
+ doc_source1 = gr.Textbox(label="Reference 1", lines=2, container=True, scale=20)
274
+ source1_page = gr.Number(label="Page", scale=1)
275
+ with gr.Row():
276
+ doc_source2 = gr.Textbox(label="Reference 2", lines=2, container=True, scale=20)
277
+ source2_page = gr.Number(label="Page", scale=1)
278
+ with gr.Row():
279
+ msg = gr.Textbox(placeholder="Type message", container=True)
280
+ with gr.Row():
281
+ submit_btn = gr.Button("Submit")
282
+ clear_btn = gr.ClearButton([msg, chatbot])
283
+
284
+ # Preprocessing events
285
+ #upload_btn.upload(upload_file, inputs=[upload_btn], outputs=[document])
286
+ db_btn.click(initialize_database, \
287
+ inputs=[document, slider_chunk_size, slider_chunk_overlap], \
288
+ outputs=[vector_db, collection_name, db_progress])
289
+ qachain_btn.click(initialize_LLM, \
290
+ inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db], \
291
+ outputs=[qa_chain, llm_progress]).then(lambda:[None,"",0,"",0], \
292
+ inputs=None, \
293
+ outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page], \
294
+ queue=False)
295
+
296
+ # Chatbot events
297
+ msg.submit(conversation, \
298
+ inputs=[qa_chain, msg, chatbot], \
299
+ outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page], \
300
+ queue=False)
301
+ submit_btn.click(conversation, \
302
+ inputs=[qa_chain, msg, chatbot], \
303
+ outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page], \
304
+ queue=False)
305
+ clear_btn.click(lambda:[None,"",0,"",0], \
306
+ inputs=None, \
307
+ outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page], \
308
+ queue=False)
309
+ demo.queue().launch(debug=True)
310
+
311
+
312
+ if __name__ == "__main__":
313
+ demo()
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ transformers
3
+ sentence-transformers
4
+ langchain<0.1.2
5
+ tqdm
6
+ accelerate
7
+ pypdf
8
+ chromadb
9
+