misbah1955 commited on
Commit
3f82a19
1 Parent(s): 708abd6

Upload 4 files

Browse files
chatfuncs/__init__.py ADDED
File without changes
chatfuncs/chatfuncs.py ADDED
@@ -0,0 +1,1032 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import os
3
+ import datetime
4
+ from typing import TypeVar, Dict, List, Tuple
5
+ import time
6
+ from itertools import compress
7
+ import pandas as pd
8
+ import numpy as np
9
+
10
+ # Model packages
11
+ import torch.cuda
12
+ from threading import Thread
13
+ from transformers import pipeline, TextIteratorStreamer
14
+
15
+ # Alternative model sources
16
+ #from dataclasses import asdict, dataclass
17
+
18
+ # Langchain functions
19
+ from langchain.prompts import PromptTemplate
20
+ from langchain.vectorstores import FAISS
21
+ from langchain.retrievers import SVMRetriever
22
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
23
+ from langchain.docstore.document import Document
24
+
25
+ # For keyword extraction (not currently used)
26
+ #import nltk
27
+ #nltk.download('wordnet')
28
+ from nltk.corpus import stopwords
29
+ from nltk.tokenize import RegexpTokenizer
30
+ from nltk.stem import WordNetLemmatizer
31
+ from keybert import KeyBERT
32
+
33
+ # For Name Entity Recognition model
34
+ #from span_marker import SpanMarkerModel # Not currently used
35
+
36
+ # For BM25 retrieval
37
+ from gensim.corpora import Dictionary
38
+ from gensim.models import TfidfModel, OkapiBM25Model
39
+ from gensim.similarities import SparseMatrixSimilarity
40
+
41
+ import gradio as gr
42
+
43
+ torch.cuda.empty_cache()
44
+
45
+ PandasDataFrame = TypeVar('pd.core.frame.DataFrame')
46
+
47
+ embeddings = None # global variable setup
48
+ vectorstore = None # global variable setup
49
+ model_type = None # global variable setup
50
+
51
+ max_memory_length = 0 # How long should the memory of the conversation last?
52
+
53
+ full_text = "" # Define dummy source text (full text) just to enable highlight function to load
54
+
55
+ model = [] # Define empty list for model functions to run
56
+ tokenizer = [] # Define empty list for model functions to run
57
+
58
+ ## Highlight text constants
59
+ hlt_chunk_size = 12
60
+ hlt_strat = [" ", ". ", "! ", "? ", ": ", "\n\n", "\n", ", "]
61
+ hlt_overlap = 4
62
+
63
+ ## Initialise NER model ##
64
+ ner_model = []#SpanMarkerModel.from_pretrained("tomaarsen/span-marker-mbert-base-multinerd") # Not currently used
65
+
66
+ ## Initialise keyword model ##
67
+ # Used to pull out keywords from chat history to add to user queries behind the scenes
68
+ kw_model = pipeline("feature-extraction", model="sentence-transformers/all-MiniLM-L6-v2")
69
+
70
+ # Currently set gpu_layers to 0 even with cuda due to persistent bugs in implementation with cuda
71
+ if torch.cuda.is_available():
72
+ torch_device = "cuda"
73
+ gpu_layers = 0
74
+ else:
75
+ torch_device = "cpu"
76
+ gpu_layers = 0
77
+
78
+ print("Running on device:", torch_device)
79
+ threads = 8 #torch.get_num_threads()
80
+ print("CPU threads:", threads)
81
+
82
+ # Flan Alpaca (small, fast) Model parameters
83
+ temperature: float = 0.1
84
+ top_k: int = 3
85
+ top_p: float = 1
86
+ repetition_penalty: float = 1.3
87
+ flan_alpaca_repetition_penalty: float = 1.3
88
+ last_n_tokens: int = 64
89
+ max_new_tokens: int = 256
90
+ seed: int = 42
91
+ reset: bool = False
92
+ stream: bool = True
93
+ threads: int = threads
94
+ batch_size:int = 256
95
+ context_length:int = 2048
96
+ sample = True
97
+
98
+
99
+ class CtransInitConfig_gpu:
100
+ def __init__(self, temperature=temperature,
101
+ top_k=top_k,
102
+ top_p=top_p,
103
+ repetition_penalty=repetition_penalty,
104
+ last_n_tokens=last_n_tokens,
105
+ max_new_tokens=max_new_tokens,
106
+ seed=seed,
107
+ reset=reset,
108
+ stream=stream,
109
+ threads=threads,
110
+ batch_size=batch_size,
111
+ context_length=context_length,
112
+ gpu_layers=gpu_layers):
113
+ self.temperature = temperature
114
+ self.top_k = top_k
115
+ self.top_p = top_p
116
+ self.repetition_penalty = repetition_penalty# repetition_penalty
117
+ self.last_n_tokens = last_n_tokens
118
+ self.max_new_tokens = max_new_tokens
119
+ self.seed = seed
120
+ self.reset = reset
121
+ self.stream = stream
122
+ self.threads = threads
123
+ self.batch_size = batch_size
124
+ self.context_length = context_length
125
+ self.gpu_layers = gpu_layers
126
+ # self.stop: list[str] = field(default_factory=lambda: [stop_string])
127
+
128
+ def update_gpu(self, new_value):
129
+ self.gpu_layers = new_value
130
+
131
+ class CtransInitConfig_cpu(CtransInitConfig_gpu):
132
+ def __init__(self):
133
+ super().__init__()
134
+ self.gpu_layers = 0
135
+
136
+ gpu_config = CtransInitConfig_gpu()
137
+ cpu_config = CtransInitConfig_cpu()
138
+
139
+
140
+ class CtransGenGenerationConfig:
141
+ def __init__(self, temperature=temperature,
142
+ top_k=top_k,
143
+ top_p=top_p,
144
+ repetition_penalty=repetition_penalty,
145
+ last_n_tokens=last_n_tokens,
146
+ seed=seed,
147
+ threads=threads,
148
+ batch_size=batch_size,
149
+ reset=True
150
+ ):
151
+ self.temperature = temperature
152
+ self.top_k = top_k
153
+ self.top_p = top_p
154
+ self.repetition_penalty = repetition_penalty# repetition_penalty
155
+ self.last_n_tokens = last_n_tokens
156
+ self.seed = seed
157
+ self.threads = threads
158
+ self.batch_size = batch_size
159
+ self.reset = reset
160
+
161
+ def update_temp(self, new_value):
162
+ self.temperature = new_value
163
+
164
+ # Vectorstore funcs
165
+
166
+ def docs_to_faiss_save(docs_out:PandasDataFrame, embeddings=embeddings):
167
+
168
+ print(f"> Total split documents: {len(docs_out)}")
169
+
170
+ vectorstore_func = FAISS.from_documents(documents=docs_out, embedding=embeddings)
171
+
172
+ '''
173
+ #with open("vectorstore.pkl", "wb") as f:
174
+ #pickle.dump(vectorstore, f)
175
+ '''
176
+
177
+ #if Path(save_to).exists():
178
+ # vectorstore_func.save_local(folder_path=save_to)
179
+ #else:
180
+ # os.mkdir(save_to)
181
+ # vectorstore_func.save_local(folder_path=save_to)
182
+
183
+ global vectorstore
184
+
185
+ vectorstore = vectorstore_func
186
+
187
+ out_message = "Document processing complete"
188
+
189
+ #print(out_message)
190
+ #print(f"> Saved to: {save_to}")
191
+
192
+ return out_message
193
+
194
+ # Prompt functions
195
+
196
+ def base_prompt_templates(model_type = "Flan Alpaca (small, fast)"):
197
+
198
+ #EXAMPLE_PROMPT = PromptTemplate(
199
+ # template="\nCONTENT:\n\n{page_content}\n\nSOURCE: {source}\n\n",
200
+ # input_variables=["page_content", "source"],
201
+ #)
202
+
203
+ CONTENT_PROMPT = PromptTemplate(
204
+ template="{page_content}\n\n",#\n\nSOURCE: {source}\n\n",
205
+ input_variables=["page_content"]
206
+ )
207
+
208
+ # The main prompt:
209
+
210
+ instruction_prompt_template_alpaca_quote = """### Instruction:
211
+ Quote directly from the SOURCE below that best answers the QUESTION. Only quote full sentences in the correct order. If you cannot find an answer, start your response with "My best guess is: ".
212
+
213
+ CONTENT: {summaries}
214
+ QUESTION: {question}
215
+
216
+ Response:"""
217
+
218
+ instruction_prompt_template_alpaca = """### Instruction:
219
+ ### User:
220
+ Answer the QUESTION using information from the following CONTENT.
221
+ CONTENT: {summaries}
222
+ QUESTION: {question}
223
+
224
+ Response:"""
225
+
226
+
227
+ instruction_prompt_template_wizard_orca = """### HUMAN:
228
+ Answer the QUESTION below based on the CONTENT. Only refer to CONTENT that directly answers the question.
229
+ CONTENT - {summaries}
230
+ QUESTION - {question}
231
+ ### RESPONSE:
232
+ """
233
+
234
+
235
+ instruction_prompt_template_orca = """
236
+ ### System:
237
+ You are an AI assistant that follows instruction extremely well. Help as much as you can.
238
+ ### User:
239
+ Answer the QUESTION with a short response using information from the following CONTENT.
240
+ QUESTION: {question}
241
+ CONTENT: {summaries}
242
+
243
+ ### Response:"""
244
+
245
+ instruction_prompt_template_orca_quote = """
246
+ ### System:
247
+ You are an AI assistant that follows instruction extremely well. Help as much as you can.
248
+ ### User:
249
+ Quote text from the CONTENT to answer the QUESTION below.
250
+ QUESTION: {question}
251
+ CONTENT: {summaries}
252
+ ### Response:
253
+ """
254
+
255
+
256
+ instruction_prompt_mistral_orca = """<|im_start|>system\n
257
+ You are an AI assistant that follows instruction extremely well. Help as much as you can.
258
+ <|im_start|>user\n
259
+ Answer the QUESTION using information from the following CONTENT. Respond with short answers that directly answer the question.
260
+ CONTENT: {summaries}
261
+ QUESTION: {question}\n
262
+ Answer:<|im_end|>"""
263
+
264
+ if model_type == "Flan Alpaca (small, fast)":
265
+ INSTRUCTION_PROMPT=PromptTemplate(template=instruction_prompt_template_alpaca, input_variables=['question', 'summaries'])
266
+ elif model_type == "Mistral Open Orca (larger, slow)":
267
+ INSTRUCTION_PROMPT=PromptTemplate(template=instruction_prompt_mistral_orca, input_variables=['question', 'summaries'])
268
+
269
+ return INSTRUCTION_PROMPT, CONTENT_PROMPT
270
+
271
+ def write_out_metadata_as_string(metadata_in):
272
+ metadata_string = [f"{' '.join(f'{k}: {v}' for k, v in d.items() if k != 'page_section')}" for d in metadata_in] # ['metadata']
273
+ return metadata_string
274
+
275
+ def generate_expanded_prompt(inputs: Dict[str, str], instruction_prompt, content_prompt, extracted_memory, vectorstore, embeddings, out_passages = 2): # ,
276
+
277
+ question = inputs["question"]
278
+ chat_history = inputs["chat_history"]
279
+
280
+
281
+ new_question_kworded = adapt_q_from_chat_history(question, chat_history, extracted_memory) # new_question_keywords,
282
+
283
+
284
+ docs_keep_as_doc, doc_df, docs_keep_out = hybrid_retrieval(new_question_kworded, vectorstore, embeddings, k_val = 25, out_passages = out_passages,
285
+ vec_score_cut_off = 0.85, vec_weight = 1, bm25_weight = 1, svm_weight = 1)#,
286
+ #vectorstore=globals()["vectorstore"], embeddings=globals()["embeddings"])
287
+
288
+ #print(docs_keep_as_doc)
289
+ #print(doc_df)
290
+ if (not docs_keep_as_doc) | (doc_df.empty):
291
+ sorry_prompt = """Say 'Sorry, there is no relevant information to answer this question.'.
292
+ RESPONSE:"""
293
+ return sorry_prompt, "No relevant sources found.", new_question_kworded
294
+
295
+ # Expand the found passages to the neighbouring context
296
+ file_type = determine_file_type(doc_df['meta_url'][0])
297
+
298
+ # Only expand passages if not tabular data
299
+ if (file_type != ".csv") & (file_type != ".xlsx"):
300
+ docs_keep_as_doc, doc_df = get_expanded_passages(vectorstore, docs_keep_out, width=3)
301
+
302
+
303
+
304
+ # Build up sources content to add to user display
305
+ doc_df['meta_clean'] = write_out_metadata_as_string(doc_df["metadata"]) # [f"<b>{' '.join(f'{k}: {v}' for k, v in d.items() if k != 'page_section')}</b>" for d in doc_df['metadata']]
306
+
307
+ # Remove meta text from the page content if it already exists there
308
+ doc_df['page_content_no_meta'] = doc_df.apply(lambda row: row['page_content'].replace(row['meta_clean'] + ". ", ""), axis=1)
309
+ doc_df['content_meta'] = doc_df['meta_clean'].astype(str) + ".<br><br>" + doc_df['page_content_no_meta'].astype(str)
310
+
311
+ #modified_page_content = [f" Document {i+1} - {word}" for i, word in enumerate(doc_df['page_content'])]
312
+ modified_page_content = [f" Document {i+1} - {word}" for i, word in enumerate(doc_df['content_meta'])]
313
+ docs_content_string = '<br><br>'.join(modified_page_content)
314
+
315
+ sources_docs_content_string = '<br><br>'.join(doc_df['content_meta'])#.replace(" "," ")#.strip()
316
+
317
+ instruction_prompt_out = instruction_prompt.format(question=new_question_kworded, summaries=docs_content_string)
318
+
319
+ print('Final prompt is: ')
320
+ print(instruction_prompt_out)
321
+
322
+ return instruction_prompt_out, sources_docs_content_string, new_question_kworded
323
+
324
+ def create_full_prompt(user_input, history, extracted_memory, vectorstore, embeddings, model_type, out_passages):
325
+
326
+ if not user_input.strip():
327
+ return history, "", "Respond with 'Please enter a question.' RESPONSE:"
328
+
329
+ #if chain_agent is None:
330
+ # history.append((user_input, "Please click the button to submit the Huggingface API key before using the chatbot (top right)"))
331
+ # return history, history, "", ""
332
+ print("\n==== date/time: " + str(datetime.datetime.now()) + " ====")
333
+ print("User input: " + user_input)
334
+
335
+ history = history or []
336
+
337
+ # Create instruction prompt
338
+ instruction_prompt, content_prompt = base_prompt_templates(model_type=model_type)
339
+ instruction_prompt_out, docs_content_string, new_question_kworded =\
340
+ generate_expanded_prompt({"question": user_input, "chat_history": history}, #vectorstore,
341
+ instruction_prompt, content_prompt, extracted_memory, vectorstore, embeddings, out_passages)
342
+
343
+
344
+ history.append(user_input)
345
+
346
+ print("Output history is:")
347
+ print(history)
348
+
349
+ print("Final prompt to model is:")
350
+ print(instruction_prompt_out)
351
+
352
+ return history, docs_content_string, instruction_prompt_out
353
+
354
+ # Chat functions
355
+ def produce_streaming_answer_chatbot(history, full_prompt, model_type,
356
+ temperature=temperature,
357
+ max_new_tokens=max_new_tokens,
358
+ sample=sample,
359
+ repetition_penalty=repetition_penalty,
360
+ top_p=top_p,
361
+ top_k=top_k
362
+ ):
363
+ #print("Model type is: ", model_type)
364
+
365
+ #if not full_prompt.strip():
366
+ # if history is None:
367
+ # history = []
368
+
369
+ # return history
370
+
371
+ if model_type == "Flan Alpaca (small, fast)":
372
+ # Get the model and tokenizer, and tokenize the user text.
373
+ model_inputs = tokenizer(text=full_prompt, return_tensors="pt", return_attention_mask=False).to(torch_device) # return_attention_mask=False was added
374
+
375
+ # Start generation on a separate thread, so that we don't block the UI. The text is pulled from the streamer
376
+ # in the main thread. Adds timeout to the streamer to handle exceptions in the generation thread.
377
+ streamer = TextIteratorStreamer(tokenizer, timeout=120., skip_prompt=True, skip_special_tokens=True)
378
+ generate_kwargs = dict(
379
+ model_inputs,
380
+ streamer=streamer,
381
+ max_new_tokens=max_new_tokens,
382
+ do_sample=sample,
383
+ repetition_penalty=repetition_penalty,
384
+ top_p=top_p,
385
+ temperature=temperature,
386
+ top_k=top_k
387
+ )
388
+
389
+ print(generate_kwargs)
390
+
391
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
392
+ t.start()
393
+
394
+ # Pull the generated text from the streamer, and update the model output.
395
+ start = time.time()
396
+ NUM_TOKENS=0
397
+ print('-'*4+'Start Generation'+'-'*4)
398
+
399
+ history[-1][1] = ""
400
+ for new_text in streamer:
401
+ if new_text == None: new_text = ""
402
+ history[-1][1] += new_text
403
+ NUM_TOKENS+=1
404
+ yield history
405
+
406
+ time_generate = time.time() - start
407
+ print('\n')
408
+ print('-'*4+'End Generation'+'-'*4)
409
+ print(f'Num of generated tokens: {NUM_TOKENS}')
410
+ print(f'Time for complete generation: {time_generate}s')
411
+ print(f'Tokens per secound: {NUM_TOKENS/time_generate}')
412
+ print(f'Time per token: {(time_generate/NUM_TOKENS)*1000}ms')
413
+
414
+ elif model_type == "Mistral Open Orca (larger, slow)":
415
+ tokens = model.tokenize(full_prompt)
416
+
417
+ gen_config = CtransGenGenerationConfig()
418
+ gen_config.update_temp(temperature)
419
+
420
+ print(vars(gen_config))
421
+
422
+ # Pull the generated text from the streamer, and update the model output.
423
+ start = time.time()
424
+ NUM_TOKENS=0
425
+ print('-'*4+'Start Generation'+'-'*4)
426
+
427
+ history[-1][1] = ""
428
+ for new_text in model.generate(tokens, **vars(gen_config)): #CtransGen_generate(prompt=full_prompt)#, config=CtransGenGenerationConfig()): # #top_k=top_k, temperature=temperature, repetition_penalty=repetition_penalty,
429
+ if new_text == None: new_text = ""
430
+ history[-1][1] += model.detokenize(new_text) #new_text
431
+ NUM_TOKENS+=1
432
+ yield history
433
+
434
+ time_generate = time.time() - start
435
+ print('\n')
436
+ print('-'*4+'End Generation'+'-'*4)
437
+ print(f'Num of generated tokens: {NUM_TOKENS}')
438
+ print(f'Time for complete generation: {time_generate}s')
439
+ print(f'Tokens per secound: {NUM_TOKENS/time_generate}')
440
+ print(f'Time per token: {(time_generate/NUM_TOKENS)*1000}ms')
441
+
442
+ # Chat helper functions
443
+
444
+ def adapt_q_from_chat_history(question, chat_history, extracted_memory, keyword_model=""):#keyword_model): # new_question_keywords,
445
+
446
+ chat_history_str, chat_history_first_q, chat_history_first_ans, max_memory_length = _get_chat_history(chat_history)
447
+
448
+ if chat_history_str:
449
+ # Keyword extraction is now done in the add_inputs_to_history function
450
+ #remove_q_stopwords(str(chat_history_first_q) + " " + str(chat_history_first_ans))
451
+
452
+
453
+ new_question_kworded = str(extracted_memory) + ". " + question #+ " " + new_question_keywords
454
+ #extracted_memory + " " + question
455
+
456
+ else:
457
+ new_question_kworded = question #new_question_keywords
458
+
459
+ #print("Question output is: " + new_question_kworded)
460
+
461
+ return new_question_kworded
462
+
463
+ def determine_file_type(file_path):
464
+ """
465
+ Determine the file type based on its extension.
466
+
467
+ Parameters:
468
+ file_path (str): Path to the file.
469
+
470
+ Returns:
471
+ str: File extension (e.g., '.pdf', '.docx', '.txt', '.html').
472
+ """
473
+ return os.path.splitext(file_path)[1].lower()
474
+
475
+
476
+ def create_doc_df(docs_keep_out):
477
+ # Extract content and metadata from 'winning' passages.
478
+ content=[]
479
+ meta=[]
480
+ meta_url=[]
481
+ page_section=[]
482
+ score=[]
483
+
484
+ doc_df = pd.DataFrame()
485
+
486
+
487
+
488
+ for item in docs_keep_out:
489
+ content.append(item[0].page_content)
490
+ meta.append(item[0].metadata)
491
+ meta_url.append(item[0].metadata['source'])
492
+
493
+ file_extension = determine_file_type(item[0].metadata['source'])
494
+ if (file_extension != ".csv") & (file_extension != ".xlsx"):
495
+ page_section.append(item[0].metadata['page_section'])
496
+ else: page_section.append("")
497
+ score.append(item[1])
498
+
499
+ # Create df from 'winning' passages
500
+
501
+ doc_df = pd.DataFrame(list(zip(content, meta, page_section, meta_url, score)),
502
+ columns =['page_content', 'metadata', 'page_section', 'meta_url', 'score'])
503
+
504
+ docs_content = doc_df['page_content'].astype(str)
505
+ doc_df['full_url'] = "https://" + doc_df['meta_url']
506
+
507
+ return doc_df
508
+
509
+ def hybrid_retrieval(new_question_kworded, vectorstore, embeddings, k_val, out_passages,
510
+ vec_score_cut_off, vec_weight, bm25_weight, svm_weight): # ,vectorstore, embeddings
511
+
512
+ #vectorstore=globals()["vectorstore"]
513
+ #embeddings=globals()["embeddings"]
514
+ doc_df = pd.DataFrame()
515
+
516
+
517
+ docs = vectorstore.similarity_search_with_score(new_question_kworded, k=k_val)
518
+
519
+ print("Docs from similarity search:")
520
+ print(docs)
521
+
522
+ # Keep only documents with a certain score
523
+ docs_len = [len(x[0].page_content) for x in docs]
524
+ docs_scores = [x[1] for x in docs]
525
+
526
+ # Only keep sources that are sufficiently relevant (i.e. similarity search score below threshold below)
527
+ score_more_limit = pd.Series(docs_scores) < vec_score_cut_off
528
+ docs_keep = list(compress(docs, score_more_limit))
529
+
530
+ if not docs_keep:
531
+ return [], pd.DataFrame(), []
532
+
533
+ # Only keep sources that are at least 100 characters long
534
+ length_more_limit = pd.Series(docs_len) >= 100
535
+ docs_keep = list(compress(docs_keep, length_more_limit))
536
+
537
+ if not docs_keep:
538
+ return [], pd.DataFrame(), []
539
+
540
+ docs_keep_as_doc = [x[0] for x in docs_keep]
541
+ docs_keep_length = len(docs_keep_as_doc)
542
+
543
+
544
+
545
+ if docs_keep_length == 1:
546
+
547
+ content=[]
548
+ meta_url=[]
549
+ score=[]
550
+
551
+ for item in docs_keep:
552
+ content.append(item[0].page_content)
553
+ meta_url.append(item[0].metadata['source'])
554
+ score.append(item[1])
555
+
556
+ # Create df from 'winning' passages
557
+
558
+ doc_df = pd.DataFrame(list(zip(content, meta_url, score)),
559
+ columns =['page_content', 'meta_url', 'score'])
560
+
561
+ docs_content = doc_df['page_content'].astype(str)
562
+ docs_url = doc_df['meta_url']
563
+
564
+ return docs_keep_as_doc, docs_content, docs_url
565
+
566
+ # Check for if more docs are removed than the desired output
567
+ if out_passages > docs_keep_length:
568
+ out_passages = docs_keep_length
569
+ k_val = docs_keep_length
570
+
571
+ vec_rank = [*range(1, docs_keep_length+1)]
572
+ vec_score = [(docs_keep_length/x)*vec_weight for x in vec_rank]
573
+
574
+ # 2nd level check on retrieved docs with BM25
575
+
576
+ content_keep=[]
577
+ for item in docs_keep:
578
+ content_keep.append(item[0].page_content)
579
+
580
+ corpus = corpus = [doc.lower().split() for doc in content_keep]
581
+ dictionary = Dictionary(corpus)
582
+ bm25_model = OkapiBM25Model(dictionary=dictionary)
583
+ bm25_corpus = bm25_model[list(map(dictionary.doc2bow, corpus))]
584
+ bm25_index = SparseMatrixSimilarity(bm25_corpus, num_docs=len(corpus), num_terms=len(dictionary),
585
+ normalize_queries=False, normalize_documents=False)
586
+ query = new_question_kworded.lower().split()
587
+ tfidf_model = TfidfModel(dictionary=dictionary, smartirs='bnn') # Enforce binary weighting of queries
588
+ tfidf_query = tfidf_model[dictionary.doc2bow(query)]
589
+ similarities = np.array(bm25_index[tfidf_query])
590
+ #print(similarities)
591
+ temp = similarities.argsort()
592
+ ranks = np.arange(len(similarities))[temp.argsort()][::-1]
593
+
594
+ # Pair each index with its corresponding value
595
+ pairs = list(zip(ranks, docs_keep_as_doc))
596
+ # Sort the pairs by the indices
597
+ pairs.sort()
598
+ # Extract the values in the new order
599
+ bm25_result = [value for ranks, value in pairs]
600
+
601
+ bm25_rank=[]
602
+ bm25_score = []
603
+
604
+ for vec_item in docs_keep:
605
+ x = 0
606
+ for bm25_item in bm25_result:
607
+ x = x + 1
608
+ if bm25_item.page_content == vec_item[0].page_content:
609
+ bm25_rank.append(x)
610
+ bm25_score.append((docs_keep_length/x)*bm25_weight)
611
+
612
+ # 3rd level check on retrieved docs with SVM retriever
613
+ svm_retriever = SVMRetriever.from_texts(content_keep, embeddings, k = k_val)
614
+ svm_result = svm_retriever.get_relevant_documents(new_question_kworded)
615
+
616
+
617
+ svm_rank=[]
618
+ svm_score = []
619
+
620
+ for vec_item in docs_keep:
621
+ x = 0
622
+ for svm_item in svm_result:
623
+ x = x + 1
624
+ if svm_item.page_content == vec_item[0].page_content:
625
+ svm_rank.append(x)
626
+ svm_score.append((docs_keep_length/x)*svm_weight)
627
+
628
+
629
+ ## Calculate final score based on three ranking methods
630
+ final_score = [a + b + c for a, b, c in zip(vec_score, bm25_score, svm_score)]
631
+ final_rank = [sorted(final_score, reverse=True).index(x)+1 for x in final_score]
632
+ # Force final_rank to increment by 1 each time
633
+ final_rank = list(pd.Series(final_rank).rank(method='first'))
634
+
635
+ #print("final rank: " + str(final_rank))
636
+ #print("out_passages: " + str(out_passages))
637
+
638
+ best_rank_index_pos = []
639
+
640
+ for x in range(1,out_passages+1):
641
+ try:
642
+ best_rank_index_pos.append(final_rank.index(x))
643
+ except IndexError: # catch the error
644
+ pass
645
+
646
+ # Adjust best_rank_index_pos to
647
+
648
+ best_rank_pos_series = pd.Series(best_rank_index_pos)
649
+
650
+
651
+ docs_keep_out = [docs_keep[i] for i in best_rank_index_pos]
652
+
653
+ # Keep only 'best' options
654
+ docs_keep_as_doc = [x[0] for x in docs_keep_out]
655
+
656
+ # Make df of best options
657
+ doc_df = create_doc_df(docs_keep_out)
658
+
659
+ return docs_keep_as_doc, doc_df, docs_keep_out
660
+
661
+ def get_expanded_passages(vectorstore, docs, width):
662
+
663
+ """
664
+ Extracts expanded passages based on given documents and a width for context.
665
+
666
+ Parameters:
667
+ - vectorstore: The primary data source.
668
+ - docs: List of documents to be expanded.
669
+ - width: Number of documents to expand around a given document for context.
670
+
671
+ Returns:
672
+ - expanded_docs: List of expanded Document objects.
673
+ - doc_df: DataFrame representation of expanded_docs.
674
+ """
675
+
676
+ from collections import defaultdict
677
+
678
+ def get_docs_from_vstore(vectorstore):
679
+ vector = vectorstore.docstore._dict
680
+ return list(vector.items())
681
+
682
+ def extract_details(docs_list):
683
+ docs_list_out = [tup[1] for tup in docs_list]
684
+ content = [doc.page_content for doc in docs_list_out]
685
+ meta = [doc.metadata for doc in docs_list_out]
686
+ return ''.join(content), meta[0], meta[-1]
687
+
688
+ def get_parent_content_and_meta(vstore_docs, width, target):
689
+ #target_range = range(max(0, target - width), min(len(vstore_docs), target + width + 1))
690
+ target_range = range(max(0, target), min(len(vstore_docs), target + width + 1)) # Now only selects extra passages AFTER the found passage
691
+ parent_vstore_out = [vstore_docs[i] for i in target_range]
692
+
693
+ content_str_out, meta_first_out, meta_last_out = [], [], []
694
+ for _ in parent_vstore_out:
695
+ content_str, meta_first, meta_last = extract_details(parent_vstore_out)
696
+ content_str_out.append(content_str)
697
+ meta_first_out.append(meta_first)
698
+ meta_last_out.append(meta_last)
699
+ return content_str_out, meta_first_out, meta_last_out
700
+
701
+ def merge_dicts_except_source(d1, d2):
702
+ merged = {}
703
+ for key in d1:
704
+ if key != "source":
705
+ merged[key] = str(d1[key]) + " to " + str(d2[key])
706
+ else:
707
+ merged[key] = d1[key] # or d2[key], based on preference
708
+ return merged
709
+
710
+ def merge_two_lists_of_dicts(list1, list2):
711
+ return [merge_dicts_except_source(d1, d2) for d1, d2 in zip(list1, list2)]
712
+
713
+ # Step 1: Filter vstore_docs
714
+ vstore_docs = get_docs_from_vstore(vectorstore)
715
+ doc_sources = {doc.metadata['source'] for doc, _ in docs}
716
+ vstore_docs = [(k, v) for k, v in vstore_docs if v.metadata.get('source') in doc_sources]
717
+
718
+ # Step 2: Group by source and proceed
719
+ vstore_by_source = defaultdict(list)
720
+ for k, v in vstore_docs:
721
+ vstore_by_source[v.metadata['source']].append((k, v))
722
+
723
+ expanded_docs = []
724
+ for doc, score in docs:
725
+ search_source = doc.metadata['source']
726
+
727
+
728
+ #if file_type == ".csv" | file_type == ".xlsx":
729
+ # content_str, meta_first, meta_last = get_parent_content_and_meta(vstore_by_source[search_source], 0, search_index)
730
+
731
+ #else:
732
+ search_section = doc.metadata['page_section']
733
+ parent_vstore_meta_section = [doc.metadata['page_section'] for _, doc in vstore_by_source[search_source]]
734
+ search_index = parent_vstore_meta_section.index(search_section) if search_section in parent_vstore_meta_section else -1
735
+
736
+ content_str, meta_first, meta_last = get_parent_content_and_meta(vstore_by_source[search_source], width, search_index)
737
+ meta_full = merge_two_lists_of_dicts(meta_first, meta_last)
738
+
739
+ expanded_doc = (Document(page_content=content_str[0], metadata=meta_full[0]), score)
740
+ expanded_docs.append(expanded_doc)
741
+
742
+ doc_df = pd.DataFrame()
743
+
744
+ doc_df = create_doc_df(expanded_docs) # Assuming you've defined the 'create_doc_df' function elsewhere
745
+
746
+ return expanded_docs, doc_df
747
+
748
+ def highlight_found_text(search_text: str, full_text: str, hlt_chunk_size:int=hlt_chunk_size, hlt_strat:List=hlt_strat, hlt_overlap:int=hlt_overlap) -> str:
749
+ """
750
+ Highlights occurrences of search_text within full_text.
751
+
752
+ Parameters:
753
+ - search_text (str): The text to be searched for within full_text.
754
+ - full_text (str): The text within which search_text occurrences will be highlighted.
755
+
756
+ Returns:
757
+ - str: A string with occurrences of search_text highlighted.
758
+
759
+ Example:
760
+ >>> highlight_found_text("world", "Hello, world! This is a test. Another world awaits.")
761
+ 'Hello, <mark style="color:black;">world</mark>! This is a test. Another <mark style="color:black;">world</mark> awaits.'
762
+ """
763
+
764
+ def extract_text_from_input(text, i=0):
765
+ if isinstance(text, str):
766
+ return text.replace(" ", " ").strip()
767
+ elif isinstance(text, list):
768
+ return text[i][0].replace(" ", " ").strip()
769
+ else:
770
+ return ""
771
+
772
+ def extract_search_text_from_input(text):
773
+ if isinstance(text, str):
774
+ return text.replace(" ", " ").strip()
775
+ elif isinstance(text, list):
776
+ return text[-1][1].replace(" ", " ").strip()
777
+ else:
778
+ return ""
779
+
780
+ full_text = extract_text_from_input(full_text)
781
+ search_text = extract_search_text_from_input(search_text)
782
+
783
+
784
+
785
+ text_splitter = RecursiveCharacterTextSplitter(
786
+ chunk_size=hlt_chunk_size,
787
+ separators=hlt_strat,
788
+ chunk_overlap=hlt_overlap,
789
+ )
790
+ sections = text_splitter.split_text(search_text)
791
+
792
+ found_positions = {}
793
+ for x in sections:
794
+ text_start_pos = 0
795
+ while text_start_pos != -1:
796
+ text_start_pos = full_text.find(x, text_start_pos)
797
+ if text_start_pos != -1:
798
+ found_positions[text_start_pos] = text_start_pos + len(x)
799
+ text_start_pos += 1
800
+
801
+ # Combine overlapping or adjacent positions
802
+ sorted_starts = sorted(found_positions.keys())
803
+ combined_positions = []
804
+ if sorted_starts:
805
+ current_start, current_end = sorted_starts[0], found_positions[sorted_starts[0]]
806
+ for start in sorted_starts[1:]:
807
+ if start <= (current_end + 10):
808
+ current_end = max(current_end, found_positions[start])
809
+ else:
810
+ combined_positions.append((current_start, current_end))
811
+ current_start, current_end = start, found_positions[start]
812
+ combined_positions.append((current_start, current_end))
813
+
814
+ # Construct pos_tokens
815
+ pos_tokens = []
816
+ prev_end = 0
817
+ for start, end in combined_positions:
818
+ if end-start > 15: # Only combine if there is a significant amount of matched text. Avoids picking up single words like 'and' etc.
819
+ pos_tokens.append(full_text[prev_end:start])
820
+ pos_tokens.append('<mark style="color:black;">' + full_text[start:end] + '</mark>')
821
+ prev_end = end
822
+ pos_tokens.append(full_text[prev_end:])
823
+
824
+ return "".join(pos_tokens)
825
+
826
+
827
+ # # Chat history functions
828
+
829
+ def clear_chat(chat_history_state, sources, chat_message, current_topic):
830
+ chat_history_state = []
831
+ sources = ''
832
+ chat_message = ''
833
+ current_topic = ''
834
+
835
+ return chat_history_state, sources, chat_message, current_topic
836
+
837
+ def _get_chat_history(chat_history: List[Tuple[str, str]], max_memory_length:int = max_memory_length): # Limit to last x interactions only
838
+
839
+ if (not chat_history) | (max_memory_length == 0):
840
+ chat_history = []
841
+
842
+ if len(chat_history) > max_memory_length:
843
+ chat_history = chat_history[-max_memory_length:]
844
+
845
+ #print(chat_history)
846
+
847
+ first_q = ""
848
+ first_ans = ""
849
+ for human_s, ai_s in chat_history:
850
+ first_q = human_s
851
+ first_ans = ai_s
852
+
853
+ #print("Text to keyword extract: " + first_q + " " + first_ans)
854
+ break
855
+
856
+ conversation = ""
857
+ for human_s, ai_s in chat_history:
858
+ human = f"Human: " + human_s
859
+ ai = f"Assistant: " + ai_s
860
+ conversation += "\n" + "\n".join([human, ai])
861
+
862
+ return conversation, first_q, first_ans, max_memory_length
863
+
864
+ def add_inputs_answer_to_history(user_message, history, current_topic):
865
+
866
+ if history is None:
867
+ history = [("","")]
868
+
869
+ #history.append((user_message, [-1]))
870
+
871
+ chat_history_str, chat_history_first_q, chat_history_first_ans, max_memory_length = _get_chat_history(history)
872
+
873
+
874
+ # Only get the keywords for the first question and response, or do it every time if over 'max_memory_length' responses in the conversation
875
+ if (len(history) == 1) | (len(history) > max_memory_length):
876
+
877
+ #print("History after appending is:")
878
+ #print(history)
879
+
880
+ first_q_and_first_ans = str(chat_history_first_q) + " " + str(chat_history_first_ans)
881
+ #ner_memory = remove_q_ner_extractor(first_q_and_first_ans)
882
+ keywords = keybert_keywords(first_q_and_first_ans, n = 8, kw_model=kw_model)
883
+ #keywords.append(ner_memory)
884
+
885
+ # Remove duplicate words while preserving order
886
+ ordered_tokens = set()
887
+ result = []
888
+ for word in keywords:
889
+ if word not in ordered_tokens:
890
+ ordered_tokens.add(word)
891
+ result.append(word)
892
+
893
+ extracted_memory = ' '.join(result)
894
+
895
+ else: extracted_memory=current_topic
896
+
897
+ print("Extracted memory is:")
898
+ print(extracted_memory)
899
+
900
+
901
+ return history, extracted_memory
902
+
903
+ # Keyword functions
904
+
905
+ def remove_q_stopwords(question): # Remove stopwords from question. Not used at the moment
906
+ # Prepare keywords from question by removing stopwords
907
+ text = question.lower()
908
+
909
+ # Remove numbers
910
+ text = re.sub('[0-9]', '', text)
911
+
912
+ tokenizer = RegexpTokenizer(r'\w+')
913
+ text_tokens = tokenizer.tokenize(text)
914
+ #text_tokens = word_tokenize(text)
915
+ tokens_without_sw = [word for word in text_tokens if not word in stopwords]
916
+
917
+ # Remove duplicate words while preserving order
918
+ ordered_tokens = set()
919
+ result = []
920
+ for word in tokens_without_sw:
921
+ if word not in ordered_tokens:
922
+ ordered_tokens.add(word)
923
+ result.append(word)
924
+
925
+
926
+
927
+ new_question_keywords = ' '.join(result)
928
+ return new_question_keywords
929
+
930
+ def remove_q_ner_extractor(question):
931
+
932
+ predict_out = ner_model.predict(question)
933
+
934
+
935
+
936
+ predict_tokens = [' '.join(v for k, v in d.items() if k == 'span') for d in predict_out]
937
+
938
+ # Remove duplicate words while preserving order
939
+ ordered_tokens = set()
940
+ result = []
941
+ for word in predict_tokens:
942
+ if word not in ordered_tokens:
943
+ ordered_tokens.add(word)
944
+ result.append(word)
945
+
946
+
947
+
948
+ new_question_keywords = ' '.join(result).lower()
949
+ return new_question_keywords
950
+
951
+ def apply_lemmatize(text, wnl=WordNetLemmatizer()):
952
+
953
+ def prep_for_lemma(text):
954
+
955
+ # Remove numbers
956
+ text = re.sub('[0-9]', '', text)
957
+ print(text)
958
+
959
+ tokenizer = RegexpTokenizer(r'\w+')
960
+ text_tokens = tokenizer.tokenize(text)
961
+ #text_tokens = word_tokenize(text)
962
+
963
+ return text_tokens
964
+
965
+ tokens = prep_for_lemma(text)
966
+
967
+ def lem_word(word):
968
+
969
+ if len(word) > 3: out_word = wnl.lemmatize(word)
970
+ else: out_word = word
971
+
972
+ return out_word
973
+
974
+ return [lem_word(token) for token in tokens]
975
+
976
+ def keybert_keywords(text, n, kw_model):
977
+ tokens_lemma = apply_lemmatize(text)
978
+ lemmatised_text = ' '.join(tokens_lemma)
979
+
980
+ keywords_text = KeyBERT(model=kw_model).extract_keywords(lemmatised_text, stop_words='english', top_n=n,
981
+ keyphrase_ngram_range=(1, 1))
982
+ keywords_list = [item[0] for item in keywords_text]
983
+
984
+ return keywords_list
985
+
986
+ # Gradio functions
987
+ def turn_off_interactivity(user_message, history):
988
+ return gr.update(value="", interactive=False), history + [[user_message, None]]
989
+
990
+ def restore_interactivity():
991
+ return gr.update(interactive=True)
992
+
993
+ def update_message(dropdown_value):
994
+ return gr.Textbox.update(value=dropdown_value)
995
+
996
+ def hide_block():
997
+ return gr.Radio.update(visible=False)
998
+
999
+ # Vote function
1000
+
1001
+ def vote(data: gr.LikeData, chat_history, instruction_prompt_out, model_type):
1002
+ import os
1003
+ import pandas as pd
1004
+
1005
+ chat_history_last = str(str(chat_history[-1][0]) + " - " + str(chat_history[-1][1]))
1006
+
1007
+ response_df = pd.DataFrame(data={"thumbs_up":data.liked,
1008
+ "chosen_response":data.value,
1009
+ "input_prompt":instruction_prompt_out,
1010
+ "chat_history":chat_history_last,
1011
+ "model_type": model_type,
1012
+ "date_time": pd.Timestamp.now()}, index=[0])
1013
+
1014
+ if data.liked:
1015
+ print("You upvoted this response: " + data.value)
1016
+
1017
+ if os.path.isfile("thumbs_up_data.csv"):
1018
+ existing_thumbs_up_df = pd.read_csv("thumbs_up_data.csv")
1019
+ thumbs_up_df_concat = pd.concat([existing_thumbs_up_df, response_df], ignore_index=True).drop("Unnamed: 0",axis=1, errors="ignore")
1020
+ thumbs_up_df_concat.to_csv("thumbs_up_data.csv")
1021
+ else:
1022
+ response_df.to_csv("thumbs_up_data.csv")
1023
+
1024
+ else:
1025
+ print("You downvoted this response: " + data.value)
1026
+
1027
+ if os.path.isfile("thumbs_down_data.csv"):
1028
+ existing_thumbs_down_df = pd.read_csv("thumbs_down_data.csv")
1029
+ thumbs_down_df_concat = pd.concat([existing_thumbs_down_df, response_df], ignore_index=True).drop("Unnamed: 0",axis=1, errors="ignore")
1030
+ thumbs_down_df_concat.to_csv("thumbs_down_data.csv")
1031
+ else:
1032
+ response_df.to_csv("thumbs_down_data.csv")
chatfuncs/ingest.py ADDED
@@ -0,0 +1,655 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ---
2
+ # jupyter:
3
+ # jupytext:
4
+ # formats: ipynb,py:light
5
+ # text_representation:
6
+ # extension: .py
7
+ # format_name: light
8
+ # format_version: '1.5'
9
+ # jupytext_version: 1.14.6
10
+ # kernelspec:
11
+ # display_name: Python 3 (ipykernel)
12
+ # language: python
13
+ # name: python3
14
+ # ---
15
+
16
+ # # Ingest website to FAISS
17
+
18
+ # ## Install/ import stuff we need
19
+
20
+ import os
21
+ from pathlib import Path
22
+ import re
23
+ import requests
24
+ import pandas as pd
25
+ import dateutil.parser
26
+ from typing import TypeVar, List
27
+
28
+ from langchain.embeddings import HuggingFaceEmbeddings # HuggingFaceInstructEmbeddings,
29
+ from langchain.vectorstores.faiss import FAISS
30
+ from langchain.vectorstores import Chroma
31
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
32
+ from langchain.docstore.document import Document
33
+
34
+ from bs4 import BeautifulSoup
35
+ from docx import Document as Doc
36
+ from pypdf import PdfReader
37
+
38
+ PandasDataFrame = TypeVar('pd.core.frame.DataFrame')
39
+ # -
40
+
41
+ split_strat = ["\n\n", "\n", ". ", "! ", "? "]
42
+ chunk_size = 500
43
+ chunk_overlap = 0
44
+ start_index = True
45
+
46
+ ## Parse files
47
+ def determine_file_type(file_path):
48
+ """
49
+ Determine the file type based on its extension.
50
+
51
+ Parameters:
52
+ file_path (str): Path to the file.
53
+
54
+ Returns:
55
+ str: File extension (e.g., '.pdf', '.docx', '.txt', '.html').
56
+ """
57
+ return os.path.splitext(file_path)[1].lower()
58
+
59
+ def parse_file(file_paths, text_column='text'):
60
+ """
61
+ Accepts a list of file paths, determines each file's type based on its extension,
62
+ and passes it to the relevant parsing function.
63
+
64
+ Parameters:
65
+ file_paths (list): List of file paths.
66
+ text_column (str): Name of the column in CSV/Excel files that contains the text content.
67
+
68
+ Returns:
69
+ dict: A dictionary with file paths as keys and their parsed content (or error message) as values.
70
+ """
71
+
72
+
73
+
74
+ if not isinstance(file_paths, list):
75
+ raise ValueError("Expected a list of file paths.")
76
+
77
+ extension_to_parser = {
78
+ '.pdf': parse_pdf,
79
+ '.docx': parse_docx,
80
+ '.txt': parse_txt,
81
+ '.html': parse_html,
82
+ '.htm': parse_html, # Considering both .html and .htm for HTML files
83
+ '.csv': lambda file_path: parse_csv_or_excel(file_path, text_column),
84
+ '.xlsx': lambda file_path: parse_csv_or_excel(file_path, text_column)
85
+ }
86
+
87
+ parsed_contents = {}
88
+ file_names = []
89
+
90
+ for file_path in file_paths:
91
+ print(file_path.name)
92
+ #file = open(file_path.name, 'r')
93
+ #print(file)
94
+ file_extension = determine_file_type(file_path.name)
95
+ if file_extension in extension_to_parser:
96
+ parsed_contents[file_path.name] = extension_to_parser[file_extension](file_path.name)
97
+ else:
98
+ parsed_contents[file_path.name] = f"Unsupported file type: {file_extension}"
99
+
100
+ filename_end = get_file_path_end(file_path.name)
101
+
102
+ file_names.append(filename_end)
103
+
104
+ return parsed_contents, file_names
105
+
106
+ def text_regex_clean(text):
107
+ # Merge hyphenated words
108
+ text = re.sub(r"(\w+)-\n(\w+)", r"\1\2", text)
109
+ # If a double newline ends in a letter, add a full stop.
110
+ text = re.sub(r'(?<=[a-zA-Z])\n\n', '.\n\n', text)
111
+ # Fix newlines in the middle of sentences
112
+ text = re.sub(r"(?<!\n\s)\n(?!\s\n)", " ", text.strip())
113
+ # Remove multiple newlines
114
+ text = re.sub(r"\n\s*\n", "\n\n", text)
115
+ text = re.sub(r" ", " ", text)
116
+ # Add full stops and new lines between words with no space between where the second one has a capital letter
117
+ text = re.sub(r'(?<=[a-z])(?=[A-Z])', '. \n\n', text)
118
+
119
+ return text
120
+
121
+ def parse_csv_or_excel(file_paths, text_column = "text"):
122
+ """
123
+ Read in a CSV or Excel file.
124
+
125
+ Parameters:
126
+ file_path (str): Path to the CSV file.
127
+ text_column (str): Name of the column in the CSV file that contains the text content.
128
+
129
+ Returns:
130
+ Pandas DataFrame: Dataframe output from file read
131
+ """
132
+
133
+ file_names = []
134
+ out_df = pd.DataFrame()
135
+
136
+ for file_path in file_paths:
137
+ file_extension = determine_file_type(file_path.name)
138
+ file_name = get_file_path_end(file_path.name)
139
+
140
+ if file_extension == ".csv":
141
+ df = pd.read_csv(file_path.name)
142
+ if text_column not in df.columns: return pd.DataFrame(), ['Please choose a valid column name']
143
+ df['source'] = file_name
144
+ df['page_section'] = ""
145
+ elif file_extension == ".xlsx":
146
+ df = pd.read_excel(file_path.name, engine='openpyxl')
147
+ if text_column not in df.columns: return pd.DataFrame(), ['Please choose a valid column name']
148
+ df['source'] = file_name
149
+ df['page_section'] = ""
150
+ else:
151
+ print(f"Unsupported file type: {file_extension}")
152
+ return pd.DataFrame(), ['Please choose a valid file type']
153
+
154
+ file_names.append(file_name)
155
+ out_df = pd.concat([out_df, df])
156
+
157
+ #if text_column not in df.columns:
158
+ # return f"Column '{text_column}' not found in {file_path}"
159
+ #text_out = " ".join(df[text_column].dropna().astype(str))
160
+ return out_df, file_names
161
+
162
+ def parse_excel(file_path, text_column):
163
+ """
164
+ Read text from an Excel file.
165
+
166
+ Parameters:
167
+ file_path (str): Path to the Excel file.
168
+ text_column (str): Name of the column in the Excel file that contains the text content.
169
+
170
+ Returns:
171
+ Pandas DataFrame: Dataframe output from file read
172
+ """
173
+ df = pd.read_excel(file_path, engine='openpyxl')
174
+ #if text_column not in df.columns:
175
+ # return f"Column '{text_column}' not found in {file_path}"
176
+ #text_out = " ".join(df[text_column].dropna().astype(str))
177
+ return df
178
+
179
+ def parse_pdf(file) -> List[str]:
180
+
181
+ """
182
+ Extract text from a PDF file.
183
+
184
+ Parameters:
185
+ file_path (str): Path to the PDF file.
186
+
187
+ Returns:
188
+ List[str]: Extracted text from the PDF.
189
+ """
190
+
191
+ output = []
192
+ #for file in files:
193
+ print(file) # .name
194
+ pdf = PdfReader(file) #[i] .name[i]
195
+
196
+ for page in pdf.pages:
197
+ text = page.extract_text()
198
+
199
+ text = text_regex_clean(text)
200
+
201
+ output.append(text)
202
+ return output
203
+
204
+ def parse_docx(file_path):
205
+ """
206
+ Reads the content of a .docx file and returns it as a string.
207
+
208
+ Parameters:
209
+ - file_path (str): Path to the .docx file.
210
+
211
+ Returns:
212
+ - str: Content of the .docx file.
213
+ """
214
+ doc = Doc(file_path)
215
+ full_text = []
216
+ for para in doc.paragraphs:
217
+ para = text_regex_clean(para)
218
+
219
+ full_text.append(para.text.replace(" ", " ").strip())
220
+ return '\n'.join(full_text)
221
+
222
+ def parse_txt(file_path):
223
+ """
224
+ Read text from a TXT or HTML file.
225
+
226
+ Parameters:
227
+ file_path (str): Path to the TXT or HTML file.
228
+
229
+ Returns:
230
+ str: Text content of the file.
231
+ """
232
+ with open(file_path, 'r', encoding="utf-8") as file:
233
+ file_contents = file.read().replace(" ", " ").strip()
234
+
235
+ file_contents = text_regex_clean(file_contents)
236
+
237
+ return file_contents
238
+
239
+ def parse_html(page_url, div_filter="p"):
240
+ """
241
+ Determine if the source is a web URL or a local HTML file, extract the content based on the div of choice. Also tries to extract dates (WIP)
242
+
243
+ Parameters:
244
+ page_url (str): The web URL or local file path.
245
+
246
+ Returns:
247
+ str: Extracted content.
248
+ """
249
+
250
+ def is_web_url(s):
251
+ """
252
+ Check if the input string is a web URL.
253
+ """
254
+ return s.startswith("http://") or s.startswith("https://")
255
+
256
+ def is_local_html_file(s):
257
+ """
258
+ Check if the input string is a path to a local HTML file.
259
+ """
260
+ return (s.endswith(".html") or s.endswith(".htm")) and os.path.isfile(s)
261
+
262
+ def extract_text_from_source(source):
263
+ """
264
+ Determine if the source is a web URL or a local HTML file,
265
+ and then extract its content accordingly.
266
+
267
+ Parameters:
268
+ source (str): The web URL or local file path.
269
+
270
+ Returns:
271
+ str: Extracted content.
272
+ """
273
+ if is_web_url(source):
274
+ response = requests.get(source)
275
+ response.raise_for_status() # Raise an HTTPError for bad responses
276
+ return response.text.replace(" ", " ").strip()
277
+ elif is_local_html_file(source):
278
+ with open(source, 'r', encoding='utf-8') as file:
279
+ file_out = file.read().replace
280
+ return file_out
281
+ else:
282
+ raise ValueError("Input is neither a valid web URL nor a local HTML file path.")
283
+
284
+
285
+ def clean_html_data(data, date_filter="", div_filt="p"):
286
+ """
287
+ Extracts and cleans data from HTML content.
288
+
289
+ Parameters:
290
+ data (str): HTML content to be parsed.
291
+ date_filter (str, optional): Date string to filter results. If set, only content with a date greater than this will be returned.
292
+ div_filt (str, optional): HTML tag to search for text content. Defaults to "p".
293
+
294
+ Returns:
295
+ tuple: Contains extracted text and date as strings. Returns empty strings if not found.
296
+ """
297
+
298
+ soup = BeautifulSoup(data, 'html.parser')
299
+
300
+ # Function to exclude div with id "bar"
301
+ def exclude_div_with_id_bar(tag):
302
+ return tag.has_attr('id') and tag['id'] == 'related-links'
303
+
304
+ text_elements = soup.find_all(div_filt)
305
+ date_elements = soup.find_all(div_filt, {"class": "page-neutral-intro__meta"})
306
+
307
+ # Extract date
308
+ date_out = ""
309
+ if date_elements:
310
+ date_out = re.search(">(.*?)<", str(date_elements[0])).group(1)
311
+ date_dt = dateutil.parser.parse(date_out)
312
+
313
+ if date_filter:
314
+ date_filter_dt = dateutil.parser.parse(date_filter)
315
+ if date_dt < date_filter_dt:
316
+ return '', date_out
317
+
318
+ # Extract text
319
+ text_out_final = ""
320
+ if text_elements:
321
+ text_out_final = '\n'.join(paragraph.text for paragraph in text_elements)
322
+ text_out_final = text_regex_clean(text_out_final)
323
+ else:
324
+ print(f"No elements found with tag '{div_filt}'. No text returned.")
325
+
326
+ return text_out_final, date_out
327
+
328
+
329
+ #page_url = "https://pypi.org/project/InstructorEmbedding/" #'https://www.ons.gov.uk/visualisations/censusareachanges/E09000022/index.html'
330
+
331
+ html_text = extract_text_from_source(page_url)
332
+ #print(page.text)
333
+
334
+ texts = []
335
+ metadatas = []
336
+
337
+ clean_text, date = clean_html_data(html_text, date_filter="", div_filt=div_filter)
338
+ texts.append(clean_text)
339
+ metadatas.append({"source": page_url, "date":str(date)})
340
+
341
+ #print(metadatas)
342
+
343
+ return texts, metadatas, page_url
344
+
345
+ def get_file_path_end(file_path):
346
+ match = re.search(r'(.*[\/\\])?(.+)$', file_path)
347
+
348
+ filename_end = match.group(2) if match else ''
349
+
350
+ return filename_end
351
+
352
+ # +
353
+ # Convert parsed text to docs
354
+ # -
355
+
356
+ def text_to_docs(text_dict: dict, chunk_size: int = chunk_size) -> List[Document]:
357
+ """
358
+ Converts the output of parse_file (a dictionary of file paths to content)
359
+ to a list of Documents with metadata.
360
+ """
361
+
362
+ doc_sections = []
363
+ parent_doc_sections = []
364
+
365
+ for file_path, content in text_dict.items():
366
+ ext = os.path.splitext(file_path)[1].lower()
367
+
368
+ # Depending on the file extension, handle the content
369
+ if ext == '.pdf':
370
+ docs, page_docs = pdf_text_to_docs(content, chunk_size)
371
+ elif ext in ['.html', '.htm', '.txt', '.docx']:
372
+ docs = html_text_to_docs(content, chunk_size)
373
+ elif ext in ['.csv', '.xlsx']:
374
+ docs, page_docs = csv_excel_text_to_docs(content, chunk_size)
375
+ else:
376
+ print(f"Unsupported file type {ext} for {file_path}. Skipping.")
377
+ continue
378
+
379
+
380
+ filename_end = get_file_path_end(file_path)
381
+
382
+ #match = re.search(r'(.*[\/\\])?(.+)$', file_path)
383
+ #filename_end = match.group(2) if match else ''
384
+
385
+ # Add filename as metadata
386
+ for doc in docs: doc.metadata["source"] = filename_end
387
+ #for parent_doc in parent_docs: parent_doc.metadata["source"] = filename_end
388
+
389
+ doc_sections.extend(docs)
390
+ #parent_doc_sections.extend(parent_docs)
391
+
392
+ return doc_sections#, page_docs
393
+
394
+ def pdf_text_to_docs(text, chunk_size: int = chunk_size) -> List[Document]:
395
+ """Converts a string or list of strings to a list of Documents
396
+ with metadata."""
397
+
398
+ #print(text)
399
+
400
+ if isinstance(text, str):
401
+ # Take a single string as one page
402
+ text = [text]
403
+
404
+ page_docs = [Document(page_content=page, metadata={"page": page}) for page in text]
405
+
406
+
407
+ # Add page numbers as metadata
408
+ for i, doc in enumerate(page_docs):
409
+ doc.metadata["page"] = i + 1
410
+
411
+ print("page docs are: ")
412
+ print(page_docs)
413
+
414
+ # Split pages into sections
415
+ doc_sections = []
416
+
417
+ for doc in page_docs:
418
+
419
+ #print("page content: ")
420
+ #print(doc.page_content)
421
+
422
+ if doc.page_content == '':
423
+ sections = ['']
424
+
425
+ else:
426
+ text_splitter = RecursiveCharacterTextSplitter(
427
+ chunk_size=chunk_size,
428
+ separators=split_strat,#["\n\n", "\n", ".", "!", "?", ",", " ", ""],
429
+ chunk_overlap=chunk_overlap,
430
+ add_start_index=True
431
+ )
432
+ sections = text_splitter.split_text(doc.page_content)
433
+
434
+ for i, section in enumerate(sections):
435
+ doc = Document(
436
+ page_content=section, metadata={"page": doc.metadata["page"], "section": i, "page_section": f"{doc.metadata['page']}-{i}"})
437
+
438
+
439
+ doc_sections.append(doc)
440
+
441
+ return doc_sections, page_docs#, parent_doc
442
+
443
+ def html_text_to_docs(texts, metadatas, chunk_size:int = chunk_size):
444
+
445
+ text_splitter = RecursiveCharacterTextSplitter(
446
+ separators=split_strat,#["\n\n", "\n", ".", "!", "?", ",", " ", ""],
447
+ chunk_size=chunk_size,
448
+ chunk_overlap=chunk_overlap,
449
+ length_function=len,
450
+ add_start_index=True
451
+ )
452
+
453
+ #print(texts)
454
+ #print(metadatas)
455
+
456
+ documents = text_splitter.create_documents(texts, metadatas=metadatas)
457
+
458
+ for i, section in enumerate(documents):
459
+ section.metadata["page_section"] = i + 1
460
+
461
+
462
+
463
+ return documents
464
+
465
+ def write_out_metadata_as_string(metadata_in):
466
+ # If metadata_in is a single dictionary, wrap it in a list
467
+ if isinstance(metadata_in, dict):
468
+ metadata_in = [metadata_in]
469
+
470
+ metadata_string = [f"{' '.join(f'{k}: {v}' for k, v in d.items() if k != 'page_section')}" for d in metadata_in] # ['metadata']
471
+ return metadata_string
472
+
473
+ def csv_excel_text_to_docs(df, text_column='text', chunk_size=None) -> List[Document]:
474
+ """Converts a DataFrame's content to a list of Documents with metadata."""
475
+
476
+ doc_sections = []
477
+ df[text_column] = df[text_column].astype(str) # Ensure column is a string column
478
+
479
+ # For each row in the dataframe
480
+ for idx, row in df.iterrows():
481
+ # Extract the text content for the document
482
+ doc_content = row[text_column]
483
+
484
+ # Generate metadata containing other columns' data
485
+ metadata = {"row": idx + 1}
486
+ for col, value in row.items():
487
+ if col != text_column:
488
+ metadata[col] = value
489
+
490
+ metadata_string = write_out_metadata_as_string(metadata)[0]
491
+
492
+
493
+
494
+ # If chunk_size is provided, split the text into chunks
495
+ if chunk_size:
496
+ # Assuming you have a text splitter function similar to the PDF handling
497
+ text_splitter = RecursiveCharacterTextSplitter(
498
+ chunk_size=chunk_size,
499
+ # Other arguments as required by the splitter
500
+ )
501
+ sections = text_splitter.split_text(doc_content)
502
+
503
+
504
+ # For each section, create a Document object
505
+ for i, section in enumerate(sections):
506
+ section = '. '.join([metadata_string, section])
507
+ doc = Document(page_content=section,
508
+ metadata={**metadata, "section": i, "row_section": f"{metadata['row']}-{i}"})
509
+ doc_sections.append(doc)
510
+ else:
511
+ # If no chunk_size is provided, create a single Document object for the row
512
+ doc_content = '. '.join([metadata_string, doc_content])
513
+ doc = Document(page_content=doc_content, metadata=metadata)
514
+ doc_sections.append(doc)
515
+
516
+ return doc_sections
517
+
518
+ # # Functions for working with documents after loading them back in
519
+
520
+ def pull_out_data(series):
521
+
522
+ # define a lambda function to convert each string into a tuple
523
+ to_tuple = lambda x: eval(x)
524
+
525
+ # apply the lambda function to each element of the series
526
+ series_tup = series.apply(to_tuple)
527
+
528
+ series_tup_content = list(zip(*series_tup))[1]
529
+
530
+ series = pd.Series(list(series_tup_content))#.str.replace("^Main post content", "", regex=True).str.strip()
531
+
532
+ return series
533
+
534
+ def docs_from_csv(df):
535
+
536
+ import ast
537
+
538
+ documents = []
539
+
540
+ page_content = pull_out_data(df["0"])
541
+ metadatas = pull_out_data(df["1"])
542
+
543
+ for x in range(0,len(df)):
544
+ new_doc = Document(page_content=page_content[x], metadata=metadatas[x])
545
+ documents.append(new_doc)
546
+
547
+ return documents
548
+
549
+ def docs_from_lists(docs, metadatas):
550
+
551
+ documents = []
552
+
553
+ for x, doc in enumerate(docs):
554
+ new_doc = Document(page_content=doc, metadata=metadatas[x])
555
+ documents.append(new_doc)
556
+
557
+ return documents
558
+
559
+ def docs_elements_from_csv_save(docs_path="documents.csv"):
560
+
561
+ documents = pd.read_csv(docs_path)
562
+
563
+ docs_out = docs_from_csv(documents)
564
+
565
+ out_df = pd.DataFrame(docs_out)
566
+
567
+ docs_content = pull_out_data(out_df[0].astype(str))
568
+
569
+ docs_meta = pull_out_data(out_df[1].astype(str))
570
+
571
+ doc_sources = [d['source'] for d in docs_meta]
572
+
573
+ return out_df, docs_content, docs_meta, doc_sources
574
+
575
+ # ## Create embeddings and save faiss vector store to the path specified in `save_to`
576
+
577
+ def load_embeddings(model_name = "BAAI/bge-base-en-v1.5"):
578
+
579
+ #if model_name == "hkunlp/instructor-large":
580
+ # embeddings_func = HuggingFaceInstructEmbeddings(model_name=model_name,
581
+ # embed_instruction="Represent the paragraph for retrieval: ",
582
+ # query_instruction="Represent the question for retrieving supporting documents: "
583
+ # )
584
+
585
+ #else:
586
+ embeddings_func = HuggingFaceEmbeddings(model_name=model_name)
587
+
588
+ global embeddings
589
+
590
+ embeddings = embeddings_func
591
+
592
+ return embeddings_func
593
+
594
+ def embed_faiss_save_to_zip(docs_out, save_to="faiss_lambeth_census_embedding", model_name = "BAAI/bge-base-en-v1.5"):
595
+
596
+ load_embeddings(model_name=model_name)
597
+
598
+ #embeddings_fast = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
599
+
600
+ print(f"> Total split documents: {len(docs_out)}")
601
+
602
+ vectorstore = FAISS.from_documents(documents=docs_out, embedding=embeddings)
603
+
604
+
605
+ if Path(save_to).exists():
606
+ vectorstore.save_local(folder_path=save_to)
607
+
608
+ print("> DONE")
609
+ print(f"> Saved to: {save_to}")
610
+
611
+ ### Save as zip, then remove faiss/pkl files to allow for upload to huggingface
612
+
613
+ import shutil
614
+
615
+ shutil.make_archive(save_to, 'zip', save_to)
616
+
617
+ os.remove(save_to + "/index.faiss")
618
+ os.remove(save_to + "/index.pkl")
619
+
620
+ shutil.move(save_to + '.zip', save_to + "/" + save_to + '.zip')
621
+
622
+ return vectorstore
623
+
624
+ def docs_to_chroma_save(embeddings, docs_out:PandasDataFrame, save_to:str):
625
+ print(f"> Total split documents: {len(docs_out)}")
626
+
627
+ vectordb = Chroma.from_documents(documents=docs_out,
628
+ embedding=embeddings,
629
+ persist_directory=save_to)
630
+
631
+ # persiste the db to disk
632
+ vectordb.persist()
633
+
634
+ print("> DONE")
635
+ print(f"> Saved to: {save_to}")
636
+
637
+ return vectordb
638
+
639
+ def sim_search_local_saved_vec(query, k_val, save_to="faiss_lambeth_census_embedding"):
640
+
641
+ load_embeddings()
642
+
643
+ docsearch = FAISS.load_local(folder_path=save_to, embeddings=embeddings)
644
+
645
+
646
+ display(Markdown(question))
647
+
648
+ search = docsearch.similarity_search_with_score(query, k=k_val)
649
+
650
+ for item in search:
651
+ print(item[0].page_content)
652
+ print(f"Page: {item[0].metadata['source']}")
653
+ print(f"Date: {item[0].metadata['date']}")
654
+ print(f"Score: {item[1]}")
655
+ print("---")
chatfuncs/ingest_borough_plan.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ingest as ing
2
+
3
+ borough_plan_text, file_names = ing.parse_file([open("Lambeth_2030-Our_Future_Our_Lambeth.pdf")])
4
+ print("Borough plan text created")
5
+
6
+ print(borough_plan_text)
7
+
8
+ borough_plan_docs = ing.text_to_docs(borough_plan_text)
9
+ print("Borough plan docs created")
10
+
11
+ embedding_model = "BAAI/bge-base-en-v1.5"
12
+
13
+ embeddings = ing.load_embeddings(model_name = embedding_model)
14
+ ing.embed_faiss_save_to_zip(borough_plan_docs, save_to="faiss_embedding", model_name = embedding_model)