YchKhan commited on
Commit
2131a4c
1 Parent(s): af28c82

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +347 -0
app.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import langchain
2
+ from langchain.embeddings import SentenceTransformerEmbeddings
3
+ from langchain.document_loaders import UnstructuredPDFLoader,UnstructuredWordDocumentLoader
4
+ from langchain.indexes import VectorstoreIndexCreator
5
+ from langchain.vectorstores import FAISS
6
+ from zipfile import ZipFile
7
+ import gradio as gr
8
+ import openpyxl
9
+ import os
10
+ import shutil
11
+ from langchain.schema import Document
12
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
13
+ import tiktoken
14
+ import secrets
15
+ import time
16
+ import requests
17
+ import tempfile
18
+
19
+ from groq import Groq
20
+
21
+
22
+
23
+ tokenizer = tiktoken.encoding_for_model("gpt-3.5-turbo")
24
+
25
+ # create the length function
26
+ def tiktoken_len(text):
27
+ tokens = tokenizer.encode(
28
+ text,
29
+ disallowed_special=()
30
+ )
31
+ return len(tokens)
32
+
33
+ text_splitter = RecursiveCharacterTextSplitter(
34
+ chunk_size=800,
35
+ chunk_overlap=400,
36
+ length_function=tiktoken_len,
37
+ separators=["\n\n", "\n", " ", ""]
38
+ )
39
+
40
+ embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
41
+ foo = Document(page_content='foo is fou!',metadata={"source":'foo source'})
42
+
43
+
44
+ def reset_database(ui_session_id):
45
+ session_id = f"PDFAISS-{ui_session_id}"
46
+ if 'drive' in session_id:
47
+ print("RESET DATABASE: session_id contains 'drive' !!")
48
+ return None
49
+
50
+ try:
51
+ shutil.rmtree(session_id)
52
+ except:
53
+ print(f'no {session_id} directory present')
54
+
55
+ try:
56
+ os.remove(f"{session_id}.zip")
57
+ except:
58
+ print("no {session_id}.zip present")
59
+
60
+ return None
61
+
62
+ def is_duplicate(split_docs,db):
63
+ epsilon=0.0
64
+ print(f"DUPLICATE: Treating: {split_docs[0].metadata['source'].split('/')[-1]}")
65
+ for i in range(min(3,len(split_docs))):
66
+ query = split_docs[i].page_content
67
+ docs = db.similarity_search_with_score(query,k=1)
68
+ _ , score = docs[0]
69
+ epsilon += score
70
+ print(f"DUPLICATE: epsilon: {epsilon}")
71
+ return epsilon < 0.1
72
+
73
+ def merge_split_docs_to_db(split_docs,db,progress,progress_step=0.1):
74
+ progress(progress_step,desc="merging docs")
75
+ if len(split_docs)==0:
76
+ print("MERGE to db: NO docs!!")
77
+ return
78
+
79
+ filename = split_docs[0].metadata['source']
80
+ if is_duplicate(split_docs,db):
81
+ print(f"MERGE: Document is duplicated: {filename}")
82
+ return
83
+ print(f"MERGE: number of split docs: {len(split_docs)}")
84
+ batch = 10
85
+ for i in range(0, len(split_docs), batch):
86
+ progress(i/len(split_docs),desc=f"added {i} chunks of {len(split_docs)} chunks")
87
+ db1 = FAISS.from_documents(split_docs[i:i+batch], embeddings)
88
+ db.merge_from(db1)
89
+ return db
90
+
91
+ def merge_pdf_to_db(filename,db,progress,progress_step=0.1):
92
+ progress_step+=0.05
93
+ progress(progress_step,'unpacking pdf')
94
+ doc = UnstructuredPDFLoader(filename).load()
95
+ doc[0].metadata['source'] = filename.split('/')[-1]
96
+ split_docs = text_splitter.split_documents(doc)
97
+ progress_step+=0.3
98
+ progress(progress_step,'docx unpacked')
99
+ return merge_split_docs_to_db(split_docs,db,progress,progress_step)
100
+
101
+ def merge_docx_to_db(filename,db,progress,progress_step=0.1):
102
+ progress_step+=0.05
103
+ progress(progress_step,'unpacking docx')
104
+ doc = UnstructuredWordDocumentLoader(filename).load()
105
+ doc[0].metadata['source'] = filename.split('/')[-1]
106
+ split_docs = text_splitter.split_documents(doc)
107
+ progress_step+=0.3
108
+ progress(progress_step,'docx unpacked')
109
+ return merge_split_docs_to_db(split_docs,db,progress,progress_step)
110
+
111
+ def merge_txt_to_db(filename,db,progress,progress_step=0.1):
112
+ progress_step+=0.05
113
+ progress(progress_step,'unpacking txt')
114
+ with open(filename) as f:
115
+ docs = text_splitter.split_text(f.read())
116
+ split_docs = [Document(page_content=doc,metadata={'source':filename.split('/')[-1]}) for doc in docs]
117
+ progress_step+=0.3
118
+ progress(progress_step,'txt unpacked')
119
+ return merge_split_docs_to_db(split_docs,db,progress,progress_step)
120
+
121
+ def unpack_zip_file(filename,db,progress):
122
+ with ZipFile(filename, 'r') as zipObj:
123
+ contents = zipObj.namelist()
124
+ print(f"unpack zip: contents: {contents}")
125
+ tmp_directory = filename.split('/')[-1].split('.')[-2]
126
+ shutil.unpack_archive(filename, tmp_directory)
127
+
128
+ if 'index.faiss' in [item.lower() for item in contents]:
129
+ db2 = FAISS.load_local(tmp_directory, embeddings, allow_dangerous_deserialization=True)
130
+ db.merge_from(db2)
131
+ return db
132
+
133
+ for file in contents:
134
+ if file.lower().endswith('.docx'):
135
+ db = merge_docx_to_db(f"{tmp_directory}/{file}",db,progress)
136
+ if file.lower().endswith('.pdf'):
137
+ db = merge_pdf_to_db(f"{tmp_directory}/{file}",db,progress)
138
+ if file.lower().endswith('.txt'):
139
+ db = merge_txt_to_db(f"{tmp_directory}/{file}",db,progress)
140
+ return db
141
+
142
+ def add_files_to_zip(session_id):
143
+ zip_file_name = f"{session_id}.zip"
144
+ with ZipFile(zip_file_name, "w") as zipObj:
145
+ for root, dirs, files in os.walk(session_id):
146
+ for file_name in files:
147
+ file_path = os.path.join(root, file_name)
148
+ arcname = os.path.relpath(file_path, session_id)
149
+ zipObj.write(file_path, arcname)
150
+
151
+ #### UI Functions ####
152
+
153
+ def embed_files(files,ui_session_id,progress=gr.Progress(),progress_step=0.05):
154
+ if ui_session_id not in os.environ['users'].split(', '):
155
+ return "README.md", ""
156
+ print(files)
157
+ progress(progress_step,desc="Starting...")
158
+ split_docs=[]
159
+ if len(ui_session_id)==0:
160
+ ui_session_id = secrets.token_urlsafe(16)
161
+ session_id = f"PDFAISS-{ui_session_id}"
162
+
163
+ try:
164
+ db = FAISS.load_local(session_id,embeddings, allow_dangerous_deserialization=True)
165
+ except:
166
+ print(f"SESSION: {session_id} database does not exist, create a FAISS db")
167
+ db = FAISS.from_documents([foo], embeddings)
168
+ db.save_local(session_id)
169
+ print(f"SESSION: {session_id} database created")
170
+
171
+ print("EMBEDDED, before embeddeding: ",session_id,len(db.index_to_docstore_id))
172
+ for file_id,file in enumerate(files):
173
+ print("ID : ", file_id, "FILE : ", file)
174
+ file_type = file.name.split('.')[-1].lower()
175
+ source = file.name.split('/')[-1]
176
+ print(f"current file: {source}")
177
+ progress(file_id/len(files),desc=f"Treating {source}")
178
+
179
+ if file_type == 'pdf':
180
+ db2 = merge_pdf_to_db(file.name,db,progress)
181
+
182
+ if file_type == 'txt':
183
+ db2 = merge_txt_to_db(file.name,db,progress)
184
+
185
+ if file_type == 'docx':
186
+ db2 = merge_docx_to_db(file.name,db,progress)
187
+
188
+ if file_type == 'zip':
189
+ db2 = unpack_zip_file(file.name,db,progress)
190
+
191
+ if db2 != None:
192
+ db = db2
193
+ db.save_local(session_id)
194
+ ### move file to store ###
195
+ progress(progress_step, desc = 'moving file to store')
196
+ directory_path = f"{session_id}/store/"
197
+ if not os.path.exists(directory_path):
198
+ os.makedirs(directory_path)
199
+ try:
200
+ shutil.move(file.name, directory_path)
201
+ except:
202
+ pass
203
+
204
+ ### load the updated db and zip it ###
205
+ progress(progress_step, desc = 'loading db')
206
+ db = FAISS.load_local(session_id,embeddings, allow_dangerous_deserialization=True)
207
+ print("EMBEDDED, after embeddeding: ",session_id,len(db.index_to_docstore_id))
208
+ progress(progress_step, desc = 'zipping db for download')
209
+ add_files_to_zip(session_id)
210
+ print(f"EMBEDDED: db zipped")
211
+ progress(progress_step, desc = 'db zipped')
212
+ return f"{session_id}.zip", ui_session_id, ""
213
+
214
+
215
+
216
+ def add_to_db(references,ui_session_id):
217
+ files = store_files(references)
218
+ return embed_files(files,ui_session_id)
219
+
220
+ def export_files(references):
221
+ files = store_files(references, ret_names=True)
222
+ #paths = [file.name for file in files]
223
+ return files
224
+
225
+
226
+ def display_docs(docs):
227
+ output_str = ''
228
+ for i, doc in enumerate(docs):
229
+ source = doc.metadata['source'].split('/')[-1]
230
+ output_str += f"Ref: {i+1}\n{repr(doc.page_content)}\nSource: {source}\n\n"
231
+ return output_str
232
+
233
+
234
+ def ask_llm(system, user_input):
235
+ messages = [
236
+ {
237
+ "role": "system",
238
+ "content": system
239
+ },
240
+ {
241
+ "role": "user",
242
+ "content": user_input,
243
+ }
244
+ ]
245
+ client = Groq(api_key=os.environ["GROQ_KEY"])
246
+ chat_completion = client.chat.completions.create(
247
+ messages=messages,
248
+ model='mixtral-8x7b-32768',
249
+ )
250
+ return chat_completion.choices[0].message.content
251
+
252
+ def ask_llm_stream(system, user_input):
253
+ llm_response = ""
254
+ client = Groq(api_key=os.environ["GROQ_KEY"])
255
+
256
+ messages = [
257
+ {
258
+ "role": "system",
259
+ "content": system
260
+ },
261
+ {
262
+ "role": "user",
263
+ "content": user_input,
264
+ }
265
+ ]
266
+
267
+ stream = client.chat.completions.create(
268
+ messages=messages,
269
+ model="mixtral-8x7b-32768",
270
+ temperature=0.5,
271
+ max_tokens=1024,
272
+ top_p=1,
273
+ stop=None,
274
+ stream=True,
275
+ )
276
+
277
+ for chunk in stream:
278
+ llm_response += str(chunk.choices[0].delta.content) if chunk.choices[0].delta.content is not None else ""
279
+ yield llm_response
280
+
281
+
282
+ def ask_gpt(query, ui_session_id, history):
283
+ if ui_session_id not in os.environ['users'].split(', '):
284
+ return "Please Login", "", ""
285
+ session_id = f"PDFAISS-{ui_session_id}"
286
+ try:
287
+ db = FAISS.load_local(session_id,embeddings, allow_dangerous_deserialization=True)
288
+ print("ASKGPT after loading",session_id,len(db.index_to_docstore_id))
289
+ except:
290
+ print(f"SESSION: {session_id} database does not exist")
291
+ return f"SESSION: {session_id} database does not exist","",""
292
+
293
+ docs = db.similarity_search(query)
294
+
295
+ documents = "\n\n*-*-*-*-*-*\n\n".join(f"Content: {doc.page_content}\n" for doc in docs)
296
+ system = f"# Instructions\nTake a deep breath and resonate step by step.\nYou are a helpful standard assistant. Your have only one mission and that consists in answering to the user input based on the **provided documents**. If the answer to the question that is asked by the user isn't contained in the **provided documents**, say so but **don't make up an answer**. I chose you because you can say 'I don't know' so please don't do like the other LLMs and don't define acronyms that aren\'t present in the following **PROVIDED DOCUMENTS** double check if it is present before answering. If some of the information can be useful for the user you can tell him.\nFinish your response by **ONE** follow up question that the provided documents could answer.\n\nThe documents are separated by the string \'*-*-*-*-*-*\'. Do not provide any explanations or details.\n\n# **Provided documents**: {documents}."
297
+ gen = ask_llm_stream(system, query)
298
+ last_value=""
299
+ displayable_docs = display_docs(docs)
300
+ while True:
301
+ try:
302
+ last_value = next(gen)
303
+ yield last_value, displayable_docs, history + f"[query]\n{query}\n[answer]\n{last_value}\n[references]\n{displayable_docs}\n\n"
304
+ except StopIteration as e:
305
+ break
306
+ history += f"[query]\n{query}\n[answer]\n{last_value}\n[references]\n{displayable_docs}\n\n"
307
+ return last_value, displayable_docs, history
308
+
309
+
310
+ def auth_user(ui_session_id):
311
+ if ui_session_id in os.environ['users'].split(', '):
312
+ return gr.Textbox(label='Username', visible=False), gr.File(file_count="multiple", file_types=[".txt", ".pdf",".zip",".docx"], visible=True), gr.Button("Reset AI Knowledge", visible=True), gr.Markdown(label='AI Answer', visible=True), gr.Textbox(placeholder="Type your question", label="Question ❔", scale=9, visible=True), gr.Button("▶", scale=1, visible=True), gr.Textbox(label='Sources', show_copy_button=True, visible=True), gr.File(label="Zipped database", visible=True), gr.Textbox(label='History', show_copy_button=True, visible=True)
313
+ else:
314
+ return gr.Textbox(label='Username', visible=True), gr.File(file_count="multiple", file_types=[".txt", ".pdf",".zip",".docx"], visible=False), gr.Button("Reset AI Knowledge", visible=False), gr.Markdown(label='AI Answer', visible=False), gr.Textbox(placeholder="Type your question", label="Question ❔", scale=9, visible=False), gr.Button("▶", scale=1, visible=False), gr.Textbox(label='Sources', show_copy_button=True, visible=False), gr.File(label="Zipped database", visible=False), gr.Textbox(label='History', show_copy_button=True, visible=False)
315
+
316
+ with gr.Blocks() as demo:
317
+ gr.Markdown("# Enrich an LLM knowledge with your own documents 🧠🤖")
318
+
319
+ with gr.Column():
320
+ tb_session_id = gr.Textbox(label='Username')
321
+ docs_input = gr.File(file_count="multiple", file_types=[".txt", ".pdf",".zip",".docx"], visible=False)
322
+ btn_reset_db = gr.Button("Reset AI Knowledge", visible=False)
323
+
324
+
325
+ with gr.Column():
326
+ answer_output = gr.Markdown(label='AI Answer', visible=False)
327
+ with gr.Row():
328
+ query_input = gr.Textbox(placeholder="Type your question", label="Question ❔", scale=9, visible=False)
329
+ btn_askGPT = gr.Button("▶", scale=1, visible=False)
330
+
331
+ tb_sources = gr.Textbox(label='Sources', show_copy_button=True, visible=False)
332
+
333
+
334
+ with gr.Accordion("Download your knowledge base and see your conversation history", open=False):
335
+ db_output = gr.File(label="Zipped database", visible=False)
336
+ tb_history = gr.Textbox(label='History', show_copy_button=True, visible=False, interactive=False)
337
+
338
+
339
+ tb_session_id.submit(auth_user, inputs=tb_session_id, outputs=[tb_session_id, docs_input, btn_reset_db, answer_output, query_input, btn_askGPT, tb_sources, db_output, tb_history])
340
+
341
+ docs_input.upload(embed_files, inputs=[docs_input,tb_session_id], outputs=[db_output,tb_session_id, query_input])
342
+ btn_reset_db.click(reset_database,inputs=[tb_session_id],outputs=[db_output])
343
+ btn_askGPT.click(ask_gpt, inputs=[query_input, tb_session_id, tb_history], outputs=[answer_output, tb_sources, tb_history])
344
+ query_input.submit(ask_gpt, inputs=[query_input, tb_session_id, tb_history], outputs=[answer_output, tb_sources, tb_history])
345
+
346
+
347
+ demo.launch(debug=False,share=False)