Standard_Intelligence_Dev / split_files_to_excel.py
YchKhan's picture
change chunk sizes
288dd42 verified
import numpy as np
import io
import os
import zipfile
import logging
import collections
import tempfile
from langchain.document_loaders import UnstructuredFileLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.embeddings import HuggingFaceEmbeddings
import gradio as gr
from langchain.document_loaders import PDFMinerPDFasHTMLLoader
from bs4 import BeautifulSoup
import re
from langchain.docstore.document import Document
import unstructured
from unstructured.partition.docx import partition_docx
from unstructured.partition.auto import partition
import tiktoken
#from transformers import AutoTokenizer
from pypdf import PdfReader
import pandas as pd
import requests
import json
MODEL = "thenlper/gte-base"
CHUNK_SIZE = 1500
CHUNK_OVERLAP = 400
embeddings = HuggingFaceEmbeddings(
model_name=MODEL,
cache_folder=os.getenv("SENTENCE_TRANSFORMERS_HOME")
)
# model_id = "mistralai/Mistral-7B-Instruct-v0.1"
# access_token = os.getenv("HUGGINGFACE_SPLITFILES_API_KEY")
# tokenizer = AutoTokenizer.from_pretrained(
# model_id,
# padding_side="left",
# token = access_token
# )
tokenizer = tiktoken.encoding_for_model("gpt-3.5-turbo")
text_splitter = CharacterTextSplitter(
separator = "\n",
chunk_size = CHUNK_SIZE,
chunk_overlap = CHUNK_OVERLAP,
length_function = len,
)
# def update_label(label1):
# return gr.update(choices=list(df.columns))
def function_split_call(fi_input, dropdown, choice, chunk_size):
if choice == "Intelligent split":
nb_pages = chunk_size
return split_in_df(fi_input, nb_pages)
elif choice == "Non intelligent split":
return non_intelligent_split(fi_input, chunk_size)
else:
return split_by_keywords(fi_input,dropdown)
def change_textbox(dropdown,radio):
if len(dropdown) == 0 :
dropdown = ["introduction", "objective", "summary", "conclusion"]
if radio == "Intelligent split":
return gr.Dropdown(dropdown, visible=False), gr.Number(label="First pages to keep (0 for all)", value=2, interactive=True, visible=True)
elif radio == "Intelligent split by keywords":
return gr.Dropdown(dropdown, multiselect=True, visible=True, allow_custom_value=True), gr.Number(visible=False)
elif radio == "Non intelligent split":
return gr.Dropdown(dropdown, visible=False),gr.Number(label="Chunk size", value=1000, interactive=True, visible=True)
else:
return gr.Dropdown(dropdown, visible=False),gr.Number(visible=False)
def group_text_by_font_size(content):
cur_fs = []
cur_text = ''
cur_page = -1
cur_c = content[0]
multi_fs = False
snippets = [] # first collect all snippets that have the same font size
for c in content:
# print(f"c={c}\n\n")
if c.find('a') != None and c.find('a').get('name'):
cur_page = int(c.find('a').get('name'))
sp_list = c.find_all('span')
if not sp_list:
continue
for sp in sp_list:
# print(f"sp={sp}\n\n")
if not sp:
continue
st = sp.get('style')
if not st:
continue
fs = re.findall('font-size:(\d+)px',st)
# print(f"fs={fs}\n\n")
if not fs:
continue
fs = [int(fs[0])]
if len(cur_fs)==0:
cur_fs = fs
if fs == cur_fs:
cur_text += sp.text
elif not sp.find('br') and cur_c==c:
cur_text += sp.text
cur_fs.extend(fs)
multi_fs = True
elif sp.find('br') and multi_fs == True: # if a br tag is found and the text is in a different fs, it is the last part of the multifontsize line
cur_fs.extend(fs)
snippets.append((cur_text+sp.text,max(cur_fs), cur_page))
cur_fs = []
cur_text = ''
cur_c = c
multi_fs = False
else:
snippets.append((cur_text,max(cur_fs), cur_page))
cur_fs = fs
cur_text = sp.text
cur_c = c
multi_fs = False
snippets.append((cur_text,max(cur_fs), cur_page))
return snippets
def get_titles_fs(fs_list):
filtered_fs_list = [item[0] for item in fs_list if item[0] > fs_list[0][0]]
return sorted(filtered_fs_list, reverse=True)
def calculate_total_characters(snippets):
font_sizes = {} #dictionary to store font-size and total characters
for text, font_size, _ in snippets:
#remove newline# and digits
cleaned_text = text.replace('\n', '')
#cleaned_text = re.sub(r'\d+', '', cleaned_text)
total_characters = len(cleaned_text)
#update the dictionary
if font_size in font_sizes:
font_sizes[font_size] += total_characters
else:
font_sizes[font_size] = total_characters
#convert the dictionary into a sorted list of tuples
size_charac_list = sorted(font_sizes.items(), key=lambda x: x[1], reverse=True)
return size_charac_list
def create_documents(source, snippets, font_sizes):
docs = []
titles_fs = get_titles_fs(font_sizes)
for snippet in snippets:
cur_fs = snippet[1]
if cur_fs>font_sizes[0][0] and len(snippet[0])>2:
content = min((titles_fs.index(cur_fs)+1), 3)*"#" + " " + snippet[0].replace(" ", " ")
category = "Title"
else:
content = snippet[0].replace(" ", " ")
category = "Paragraph"
metadata={"source":source, "filename":source.split("/")[-1], "file_directory": "/".join(source.split("/")[:-1]), "file_category":"", "file_sub-cat":"", "file_sub2-cat":"", "category":category, "filetype":source.split(".")[-1], "page_number":snippet[2]}
categories = source.split("/")
cat_update=""
if len(categories)>4:
cat_update = {"file_category":categories[1], "file_sub-cat":categories[2], "file_sub2-cat":categories[3]}
elif len(categories)>3:
cat_update = {"file_category":categories[1], "file_sub-cat":categories[2]}
elif len(categories)>2:
cat_update = {"file_category":categories[1]}
metadata.update(cat_update)
docs.append(Document(page_content=content, metadata=metadata))
return docs
## Group Chunks docx or pdf
# -------------------------------------------------------------------------------- NOTEBOOK-CELL: CODE
def group_chunks_by_section(chunks, min_chunk_size=64):
filtered_chunks = [chunk for chunk in chunks if chunk.metadata['category'] != 'PageBreak']# Add more filters if needed
#print(f"filtered = {len(filtered_chunks)} - before = {len(chunks)}")
new_chunks = []
seen_paragraph = False
new_title = True #switches when there is a new paragraph to create a new chunk
for i, chunk in enumerate(filtered_chunks):
# print(f"\n\n\n#{i}:METADATA: {chunk.metadata['category']}")
if new_title:
#print(f"<-- NEW title DETECTED -->")
new_chunk = chunk
new_title = False
add_content = False
new_chunk.metadata['titles'] = ""
#print(f"CONTENT: {new_chunk.page_content}\nMETADATA: {new_chunk.metadata['category']} \n title: {new_chunk.metadata['title']}")
if chunk.metadata['category'].lower() =='title':
new_chunk.metadata['titles'] += f"{chunk.page_content} ~~ "
else:
#Activates when a paragraph is seen after one or more titles
seen_paragraph = True
#Avoid adding the title 2 times to the page content
if add_content:#and chunk.page_content not in new_chunk.page_content
new_chunk.page_content += f"\n{chunk.page_content}"
#edit the end_page number, the last one keeps its place
try:
new_chunk.metadata['end_page'] = chunk.metadata['page_number']
except:
print("", end="")
#print("Exception: No page number in metadata")
add_content = True
#If filtered_chunks[i+1] raises an error, this is probably because this is the last chunk
try:
#If the next chunk is a title and we have already seen a paragraph and the current chunk content is long enough, we create a new document
if filtered_chunks[i+1].metadata['category'].lower() =="title" and seen_paragraph and len(new_chunk.page_content)>min_chunk_size:
if 'category' in new_chunk.metadata:
new_chunk.metadata.pop('category')
new_chunks.append(new_chunk)
new_title = True
seen_paragraph = False
#index out of range
except:
new_chunks.append(new_chunk)
#print('🆘 Gone through all chunks 🆘')
break
return new_chunks
# -------------------------------------------------------------------------------- NOTEBOOK-CELL: CODE
## Split documents by font
def split_pdf(file_path):
loader = PDFMinerPDFasHTMLLoader(file_path)
data = loader.load()[0] # entire pdf is loaded as a single Document
soup = BeautifulSoup(data.page_content,'html.parser')
content = soup.find_all('div')#List of all elements in div tags
try:
snippets = group_text_by_font_size(content)
except Exception as e:
print("ERROR WHILE GROUPING BY FONT SIZE", e)
snippets = [("ERROR WHILE GROUPING BY FONT SIZE", 0, -1)]
font_sizes = calculate_total_characters(snippets)#get the amount of characters for each font_size
chunks = create_documents(file_path, snippets, font_sizes)
return chunks
# -------------------------------------------------------------------------------- NOTEBOOK-CELL: CODE
def split_docx(file_path):
chunks_elms = partition_docx(filename=file_path)
chunks = []
file_categories = file_path.split("/")
for chunk_elm in chunks_elms:
category = chunk_elm.category
if category == "Title":
chunk = Document(page_content= min(chunk_elm.metadata.to_dict()['category_depth']+1, 3)*"#" + ' ' + chunk_elm.text, metadata=chunk_elm.metadata.to_dict())
else:
chunk = Document(page_content=chunk_elm.text, metadata=chunk_elm.metadata.to_dict())
metadata={"source":file_path, "filename":file_path.split("/")[-1], "file_category":"", "file_sub-cat":"", "file_sub2-cat":"", "category":category, "filetype":file_path.split(".")[-1]}
cat_update=""
if len(file_categories)>4:
cat_update = {"file_category":file_categories[1], "file_sub-cat":file_categories[2], "file_sub2-cat":file_categories[3]}
elif len(file_categories)>3:
cat_update = {"file_category":file_categories[1], "file_sub-cat":file_categories[2]}
elif len(file_categories)>2:
cat_update = {"file_category":file_categories[1]}
metadata.update(cat_update)
chunk.metadata.update(metadata)
chunks.append(chunk)
return chunks
def split_txt(file_path, chunk_size=700):
with open(file_path, 'r') as file:
content = file.read()
words = content.split()
chunks = [words[i:i + chunk_size] for i in range(0, len(words), chunk_size)]
file_basename = os.path.basename(file_path)
file_directory = os.path.dirname(file_path)
source = file_path
documents = []
for i, chunk in enumerate(chunks):
tcontent = ' '.join(chunk)
metadata = {
'source': source,
"filename": file_basename,
'file_directory': file_directory,
"file_category": "",
"file_sub-cat": "",
"file_sub2-cat": "",
"category": "",
"filetype": source.split(".")[-1],
"page_number": i
}
document = Document(page_content=tcontent, metadata=metadata)
documents.append(document)
return documents
# Load the index of documents (if it has already been built)
def rebuild_index(input_folder, output_folder):
paths_time = []
to_keep = set()
print(f'number of files {len(paths_time)}')
if len(output_folder.list_paths_in_partition()) > 0:
with tempfile.TemporaryDirectory() as temp_dir:
for f in output_folder.list_paths_in_partition():
with output_folder.get_download_stream(f) as stream:
with open(os.path.join(temp_dir, os.path.basename(f)), "wb") as f2:
f2.write(stream.read())
index = FAISS.load_local(temp_dir, embeddings)
to_remove = []
logging.info(f"{len(index.docstore._dict)} vectors loaded")
for idx, doc in index.docstore._dict.items():
source = (doc.metadata["source"], doc.metadata["last_modified"])
if source in paths_time:
# Identify documents already indexed and still present in the source folder
to_keep.add(source)
else:
# Identify documents removed from the source folder
to_remove.append(idx)
docstore_id_to_index = {v: k for k, v in index.index_to_docstore_id.items()}
# Remove documents that have been deleted from the source folder
vectors_to_remove = []
for idx in to_remove:
del index.docstore._dict[idx]
ind = docstore_id_to_index[idx]
del index.index_to_docstore_id[ind]
vectors_to_remove.append(ind)
index.index.remove_ids(np.array(vectors_to_remove, dtype=np.int64))
index.index_to_docstore_id = {
i: ind
for i, ind in enumerate(index.index_to_docstore_id.values())
}
logging.info(f"{len(to_remove)} vectors removed")
else:
index = None
to_add = [path[0] for path in paths_time if path not in to_keep]
print(f'to_keep: {to_keep}')
print(f'to_add: {to_add}')
return index, to_add
# -------------------------------------------------------------------------------- NOTEBOOK-CELL: CODE
def split_chunks_by_tokens(documents, max_length=170, overlap=10):
# Create an empty list to store the resized documents
resized = []
# Iterate through the original documents list
for doc in documents:
encoded = tokenizer.encode(doc.page_content)
if len(encoded) > max_length:
remaining_encoded = tokenizer.encode(doc.page_content)
while len(remaining_encoded) > 0:
split_doc = Document(page_content=tokenizer.decode(remaining_encoded[:max(10, max_length)]), metadata=doc.metadata.copy())
resized.append(split_doc)
remaining_encoded = remaining_encoded[max(10, max_length - overlap):]
else:
resized.append(doc)
print(f"Number of chunks before resplitting: {len(documents)} \nAfter splitting: {len(resized)}")
return resized
# -------------------------------------------------------------------------------- NOTEBOOK-CELL: CODE
def split_chunks_by_tokens_period(documents, max_length=170, overlap=10, min_chunk_size=20):
# Create an empty list to store the resized documents
resized = []
previous_file=""
to_encode = ""
skip_next = False
# Iterate through the original documents list
for i, doc in enumerate(documents):
if skip_next:
skip_next = False
continue
current_file = doc.metadata['source']
if current_file != previous_file: #chunk counting
previous_file = current_file
chunk_counter = 0
is_first_chunk = True # Keep track of the first chunk in the document
to_encode += doc.page_content
# if last chunk < min_chunk_size we add it to the previous chunk for the splitting.
try:
if (documents[i+1] is documents[-1] or documents[i+1].metadata['source'] != documents[i+2].metadata['source']) and len(tokenizer.encode(documents[i+1].page_content)) < min_chunk_size: # if the next doc is the last doc of the current file or the last of the corpus
# print('SAME DOC')
skip_next = True
to_encode += documents[i+1].page_content
except Exception as e:
print(e)
#print(f"to_encode:\n{to_encode}")
encoded = tokenizer.encode(to_encode)#encode the current document
if len(encoded) < min_chunk_size and not skip_next:
# print(f"len(encoded):{len(encoded)}<min_chunk_size:{min_chunk_size}")
continue
elif skip_next:
split_doc = Document(page_content=tokenizer.decode(encoded).replace('<s> ', ''), metadata=doc.metadata.copy())
split_doc.metadata['token_length'] = len(tokenizer.encode(split_doc.page_content))
resized.append(split_doc)
# print(f"Added a document of {split_doc.metadata['token_length']} tokens 1")
to_encode = ""
continue
else:
# print(f"len(encoded):{len(encoded)}>=min_chunk_size:{min_chunk_size}")
to_encode = ""
if len(encoded) > max_length:
# print(f"len(encoded):{len(encoded)}>=max_length:{max_length}")
remaining_encoded = encoded
is_last_chunk = False
while len(remaining_encoded) > 1 and not is_last_chunk:
# Check for a period in the first 'overlap' tokens
overlap_text = tokenizer.decode(remaining_encoded[:overlap])# Index by token
period_index_b = overlap_text.find('.')# Index by character
if len(remaining_encoded)>max_length + min_chunk_size:
# print("len(remaining_encoded)>max_length + min_chunk_size")
current_encoded = remaining_encoded[:max(10, max_length)]
else:
# print("not len(remaining_encoded)>max_length + min_chunk_size")
current_encoded = remaining_encoded #if the last chunk is to small, concatenate it with the previous one
is_last_chunk = True
split_doc = Document(page_content=tokenizer.decode(current_encoded).replace('<s> ', ''), metadata=doc.metadata.copy())
split_doc.metadata['token_length'] = len(tokenizer.encode(split_doc.page_content))
resized.append(split_doc)
# print(f"Added a document of {split_doc.metadata['token_length']} tokens 2")
break
period_index_e = -1 # an amount of character that I am sure will be greater or equal to the max lengh of a chunk, could have done len(tokenizer.decode(current_encoded))
if len(remaining_encoded)>max_length+min_chunk_size:# If it is not the last sub chunk
# print("len(remaining_encoded)>max_length+min_chunk_size")
overlap_text_last = tokenizer.decode(current_encoded[-overlap:])
period_index_last = overlap_text_last.find('.')
if period_index_last != -1 and period_index_last < len(overlap_text_last) - 1:
# print(f"period index last found at {period_index_last}")
period_index_e = period_index_last - len(overlap_text_last)
# print(f"period_index_e :{period_index_e}")
# print(f"last :{overlap_text_last}")
if not is_first_chunk:#starting after the period in overlap
# print("not is_first_chunk", period_index_b)
if period_index_b == -1:# Period not found in overlap
# print(". not found in overlap")
split_doc = Document(page_content=tokenizer.decode(current_encoded)[:period_index_e].replace('<s> ', ''), metadata=doc.metadata.copy()) # Keep regular splitting
else:
if is_last_chunk : #not the first but the last
# print("is_last_chunk")
split_doc = Document(page_content=tokenizer.decode(current_encoded)[period_index_b+1:].replace('<s> ', ''), metadata=doc.metadata.copy())
#print("Should start after \".\"")
else:
# print("not is_last_chunk", period_index_e, len(to_encode))
split_doc = Document(page_content=tokenizer.decode(current_encoded)[period_index_b+1:period_index_e].replace('<s> ', ''), metadata=doc.metadata.copy()) # Split at the begining and the end
else:#first chunk
# print("else")
split_doc = Document(page_content=tokenizer.decode(current_encoded)[:period_index_e].replace('<s> ', ''), metadata=doc.metadata.copy()) # split only at the end if its first chunk
if 'titles' in split_doc.metadata:
# print("title in metadata")
chunk_counter += 1
split_doc.metadata['chunk_id'] = chunk_counter
#A1 We could round chunk length in token if we ignore the '.' position in the overlap and save time of computation
split_doc.metadata['token_length'] = len(tokenizer.encode(split_doc.page_content))
resized.append(split_doc)
print(f"Added a document of {split_doc.metadata['token_length']} tokens 3")
remaining_encoded = remaining_encoded[max(10, max_length - overlap):]
is_first_chunk = False
# # print(len(tokenizer.encode(split_doc.page_content)), split_doc.page_content[:50], "\n-----------------")
# print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
# print(split_doc.page_content[:100])
# # print("😂😂😂😂")
# print(split_doc.page_content[-100:])
# print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
else:# len(encoded)>min_chunk_size:#ignore the chunks that are too small
print(f"found a chunk with the perfect size:{len(encoded)}")
#print(f"◀Document:{{ {doc.page_content} }} was not added because to short▶")
if 'titles' in doc.metadata:#check if it was splitted by or split_docx
chunk_counter += 1
doc.metadata['chunk_id'] = chunk_counter
doc.metadata['token_length'] = len(encoded)
doc.page_content = tokenizer.decode(encoded).replace('<s> ', '')
resized.append(doc)
print(f"Added a document of {doc.metadata['token_length']} tokens 4")
print(f"Number of chunks before resplitting: {len(documents)} \nAfter splitting: {len(resized)}")
return resized
# -------------------------------------------------------------------------------- NOTEBOOK-CELL: CODE
def split_doc_in_chunks(input_folder, base_folders, nb_pages):
docs = []
for i, filename in enumerate(input_folder):
path = filename#os.path.join(input_folder, filename)
print(f"Treating file {i+1}/{len(input_folder)}")
# Select the appropriate document loader
chunks=[]
if path.endswith(".pdf"):
# try:
print("Treatment of pdf file", path)
raw_chunks = split_pdf(path)
for raw_chunk in raw_chunks:
print(f"BASE zzzzz LIST : {base_folders} = i = {i}")
raw_chunk.metadata["Base Folder"] = base_folders[i]
sb_chunks = group_chunks_by_section(raw_chunks)
if nb_pages > 0:
for sb_chunk in sb_chunks:
print(f"CHUNK PAGENUM = {sb_chunk.metadata['page_number']}")
if int(sb_chunk.metadata["page_number"])<=nb_pages:
chunks.append(sb_chunk)
else:
break
else:
chunks = sb_chunks
print(f"Document splitted in {len(chunks)} chunks")
# for chunk in chunks:
# print(f"\n\n____\n\n\nPDF CONTENT: \n{chunk.page_content}\ntitle: {chunk.metadata['title']}\nFile Name: {chunk.metadata['filename']}\n\n")
# except Exception as e:
# print("Error while splitting the pdf file: ", e)
elif path.endswith(".docx"):
try:
print ("Treatment of docx file", path)
raw_chunks = split_docx(path)
for raw_chunk in raw_chunks:
raw_chunk.metadata["Base Folder"] = base_folders[i]
#print(f"RAW :\n***\n{raw_chunks}")
chunks = group_chunks_by_section(raw_chunks)
print(f"Document splitted in {len(chunks)} chunks")
#if "cards-Jan 2022-SP.docx" in path:
#for chunk in chunks:
#print(f"\n\n____\n\n\nDOCX CONTENT: \n{chunk.page_content}\ntitle: {chunk.metadata['title']}\nFile Name: {chunk.metadata['filename']}\n\n")
except Exception as e:
print("Error while splitting the docx file: ", e)
elif path.endswith(".doc"):
try:
loader = UnstructuredFileLoader(path)
# Load the documents and split them in chunks
chunks = loader.load_and_split(text_splitter=text_splitter)
counter, counter2 = collections.Counter(), collections.Counter()
filename = os.path.basename(path)
# Define a unique id for each chunk
for chunk in chunks:
chunk.metadata["filename"] = filename.split("/")[-1]
chunk.metadata["file_directory"] = filename.split("/")[:-1]
chunk.metadata["filetype"] = filename.split(".")[-1]
chunk.metadata["Base Folder"] = base_folders[i]
if "page" in chunk.metadata:
counter[chunk.metadata['page']] += 1
for i in range(len(chunks)):
counter2[chunks[i].metadata['page']] += 1
chunks[i].metadata['source'] = filename
else:
if len(chunks) == 1:
chunks[0].metadata['source'] = filename
#The file type is not supported (e.g. .xlsx)
except Exception as e:
print(f"An error occurred: {e}")
elif path.endswith(".txt"):
try:
print ("Treatment of txt file", path)
chunks = split_txt(path)
for chunk in chunks:
chunk.metadata["Base Folder"] = base_folders[i]
print(f"Document splitted in {len(chunks)} chunks")
except Exception as e:
print("Error while splitting the docx file: ", e)
try:
if len(chunks)>0:
docs += chunks
except NameError as e:
print(f"An error has occured: {e}")
return docs
# -------------------------------------------------------------------------------- NOTEBOOK-CELL: CODE
def resplit_by_end_of_sentence(docs, max_len, overlap, min_len):
print("❌❌\nResplitting docs by end of sentence\n❌❌")
resized_docs = split_chunks_by_tokens_period(docs, max_len, overlap, min_len)
try:
# add chunk title to all resplitted chunks #todo move this to split_chunks_by_tokens_period(inject_title = True) with a boolean parameter
cur_source = ""
cpt_chunk = 1
for resized_doc in resized_docs:
try:
title = resized_doc.metadata['titles'].split(' ~~ ')[-2] #Getting the last title of the chunk and adding it to the content if it is not the case
if title not in resized_doc.page_content:
resized_doc.page_content = title + "\n" + resized_doc.page_content
if cur_source == resized_doc.metadata["source"]:
resized_doc.metadata['chunk_number'] = cpt_chunk
else:
cpt_chunk = 1
cur_source = resized_doc.metadata["source"]
resized_doc.metadata['chunk_number'] = cpt_chunk
except Exception as e:#either the title was notfound or title absent in metadata
print("An error occured: ", e)
#print(f"METADATA:\n{resized_doc.metadata}")
cpt_chunk += 1
except Exception as e:
print('AN ERROR OCCURRED: ', e)
return resized_docs
# -------------------------------------------------------------------------------- NOTEBOOK-CELL: CODE
def build_index(docs, index, output_folder):
if len(docs) > 0:
if index is not None:
# Compute the embedding of each chunk and index these chunks
new_index = FAISS.from_documents(docs, embeddings)
index.merge_from(new_index)
else:
index = FAISS.from_documents(docs, embeddings)
with tempfile.TemporaryDirectory() as temp_dir:
index.save_local(temp_dir)
for f in os.listdir(temp_dir):
output_folder.upload_file(f, os.path.join(temp_dir, f))
def extract_zip(zip_path):
extracted_files = []
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
for file_info in zip_ref.infolist():
extracted_files.append(file_info.filename)
zip_ref.extract(file_info.filename)
return extracted_files
def split_in_df(files, nb_pages):
processed_files = []
base_folders = []
print("Processing zip files...")
for file_path in files:
if file_path.endswith('.zip'):
extracted_files = extract_zip(file_path)
processed_files.extend(extracted_files)
base_folders.extend([os.path.splitext(os.path.basename(file_path))[0]] * len(extracted_files))
else:
processed_files.append(file_path)
base_folders.append("")
print(f"BASE FOLDERS LIST : {base_folders}, FILES LIST : {processed_files}")
print("Finished processing zip files\nSplitting files into chunks...")
documents = split_doc_in_chunks(processed_files, base_folders, nb_pages)
re_docs = resplit_by_end_of_sentence(documents, 700, 100, 1000)
print("Finished splitting")
df = pd.DataFrame()
for re_doc in re_docs:
filename = re_doc.metadata['filename']
content = re_doc.page_content
# metadata = document.metadata
# metadata_keys = list(metadata.keys())
# metadata_values = list(metadata.values())
doc_data = {'Filename': filename, 'Content': content}
doc_data["Token_Length"] = re_doc.metadata['token_length']
doc_data["Titles"] = re_doc.metadata['titles'] if 'titles' in re_doc.metadata else ""
doc_data["Base Folder"] = re_doc.metadata["Base Folder"]
# for key, value in zip(metadata_keys, metadata_values):
# doc_data[key] = value
df = pd.concat([df, pd.DataFrame([doc_data])], ignore_index=True)
df.to_excel("dataframe.xlsx", index=False)
return "dataframe.xlsx"
# -------------------------------------------------------------------------------- SPLIT FILES BY KEYWORDS
def split_by_keywords(files, key_words, words_limit=1000):
processed_files = []
extracted_content = []
tabLine = []
# For each files : stock the PDF, extract the Zips and convert the Doc & Docx to PDF
try:
not_duplicate = True
for f in files:
for p in processed_files:
if (f[:f.rfind('.')] == p[:p.rfind('.')]):
not_duplicate = False
if not_duplicate:
if f.endswith('.zip'):
extracted_files = extract_zip(f)
print(f"Those are my extracted files{extracted_files}")
for doc in extracted_files:
if doc.endswith('.doc') or doc.endswith('.docx'):
processed_files.append(transform_to_pdf(doc))
if doc.endswith('.pdf'):
processed_files.append(doc)
if f.endswith('.pdf'):
processed_files.append(f)
if f.endswith('.doc') or f.endswith('.docx'):
processed_files.append(transform_to_pdf(f))
except Exception as ex:
print(f"Error occured while processing files : {ex}")
# For each processed files extract content
for file in processed_files:
try:
file_name = file
file = PdfReader(file)
pdfNumberPages = len(file.pages)
for pdfPage in range(0, pdfNumberPages):
load_page = file.get_page(pdfPage)
text = load_page.extract_text()
lines = text.split("\n")
sizeOfLines = len(lines) - 1
for index, line in enumerate(lines):
print(line)
for key in key_words:
if key in line:
print("Found keyword")
lineBool = True
lineIndex = index
previousSelectedLines = []
stringLength = 0
linesForSelection = lines
loadOnce = True
selectedPdfPage = pdfPage
while lineBool:
print(lineIndex)
if stringLength > words_limit or lineIndex < 0:
lineBool = False
else:
if lineIndex == 0:
print(f"Line index == 0")
if pdfPage == 0:
lineBool = False
else:
try:
selectedPdfPage -= 1
newLoad_page = file.get_page(selectedPdfPage)
newText = newLoad_page.extract_text()
newLines = newText.split("\n")
linesForSelection = newLines
print(f"len newLines{len(newLines)}")
lineIndex = len(newLines) - 1
except Exception as e:
print(f"Loading previous PDF page failed")
lineBool = False
previousSelectedLines.append(linesForSelection[lineIndex])
stringLength += len(linesForSelection[lineIndex])
lineIndex -= 1
previousSelectedLines = ' '.join(previousSelectedLines[::-1])
lineBool = True
lineIndex = index + 1
nextSelectedLines = ""
linesForSelection = lines
loadOnce = True
selectedPdfPage = pdfPage
while lineBool:
if len(nextSelectedLines.split()) > words_limit:
lineBool = False
else:
if lineIndex > sizeOfLines:
lineBool = False
if pdfPage == pdfNumberPages - 1:
lineBool = False
else:
try:
selectedPdfPage += 1
newLoad_page = file.get_page(selectedPdfPage)
newText = newLoad_page.extract_text()
newLines = newText.split("\n")
linesForSelection = newLines
lineIndex = 0
except Exception as e:
print(f"Loading next PDF page failed")
lineBool = False
else:
nextSelectedLines += " " + linesForSelection[lineIndex]
lineIndex += 1
print(f"Previous Lines : {previousSelectedLines}")
print(f"Next Lines : {nextSelectedLines}")
selectedText = previousSelectedLines + ' ' + nextSelectedLines
print(selectedText)
tabLine.append([file_name, selectedText, key])
print(f"Selected line in keywords is: {line}")
except Exception as ex:
print(f"Error occured while extracting content : {ex}")
for r in tabLine:
text_joined = ''.join(r[1])
text_joined = r[2] + " : \n " + text_joined
extracted_content.append([r[0], text_joined])
df = pd.DataFrame()
for content in extracted_content:
filename = content[0]
text = content[1]
# metadata = document.metadata
# metadata_keys = list(metadata.keys())
# metadata_values = list(metadata.values())
doc_data = {'Filename': filename[filename.rfind("/")+1:], 'Content': text}
# for key, value in zip(metadata_keys, metadata_values):
# doc_data[key] = value
df = pd.concat([df, pd.DataFrame([doc_data])], ignore_index=True)
df.to_excel("dataframe_keywords.xlsx", index=False)
return "dataframe_keywords.xlsx"
# -------------------------------------------------------------------------------- NON INTELLIGENT SPLIT
def transform_to_pdf(doc):
instructions = {'parts': [{'file': 'document'}]}
response = requests.request(
'POST',
'https://api.pspdfkit.com/build',
headers = { 'Authorization': 'Bearer pdf_live_nS6tyylSW57PNw9TIEKKL3Tt16NmLCazlQWQ9D33t0Q'},
files = {'document': open(doc, 'rb')},
data = {'instructions': json.dumps(instructions)},
stream = True
)
pdf_name = doc[:doc.find(".doc")] + ".pdf"
if response.ok:
with open(pdf_name, 'wb') as fd:
for chunk in response.iter_content(chunk_size=8096):
fd.write(chunk)
return pdf_name
else:
print(response.text)
exit()
return none
def non_intelligent_split(files, chunk_size = 1000):
extracted_content = []
processed_files = []
# For each files : stock the PDF, extract the Zips and convert the Doc & Docx to PDF
try:
not_duplicate = True
for f in files:
for p in processed_files:
if (f[:f.rfind('.')] == p[:p.rfind('.')]):
not_duplicate = False
if not_duplicate:
if f.endswith('.zip'):
extracted_files = extract_zip(f)
print(f"Those are my extracted files{extracted_files}")
for doc in extracted_files:
if doc.endswith('.doc') or doc.endswith('.docx'):
processed_files.append(transform_to_pdf(doc))
if doc.endswith('.pdf'):
processed_files.append(doc)
if f.endswith('.pdf'):
processed_files.append(f)
if f.endswith('.doc') or f.endswith('.docx'):
processed_files.append(transform_to_pdf(f))
except Exception as ex:
print(f"Error occured while processing files : {ex}")
# Extract content from each processed files
try:
for f in processed_files:
print(f"my filename is : {f}")
file = PdfReader(f)
pdfNumberPages = len(file.pages)
selectedText = ""
for pdfPage in range(0, pdfNumberPages):
load_page = file.get_page(pdfPage)
text = load_page.extract_text()
lines = text.split("\n")
sizeOfLines = 0
for index, line in enumerate(lines):
sizeOfLines += len(line)
selectedText += " " + line
if sizeOfLines >= chunk_size:
textContent = (f"Page {str(pdfPage)} : {selectedText}")
extracted_content.append([f, textContent])
sizeOfLines = 0
selectedText = ""
textContent = (f"Page {str(pdfNumberPages)} : {selectedText}")
extracted_content.append([f, textContent])
except Exception as ex:
print(f"Error occured while extracting content from processed files : {ex}")
df = pd.DataFrame()
for content in extracted_content:
filename = content[0]
text = content[1]
doc_data = {'Filename': filename[filename.rfind("/")+1:], 'Content': text}
df = pd.concat([df, pd.DataFrame([doc_data])], ignore_index=True)
df.to_excel("dataframe_keywords.xlsx", index=False)
return "dataframe_keywords.xlsx"