from langchain_huggingface import HuggingFaceEmbeddings
from langchain_community.document_loaders import TextLoader,Docx2txtLoader,PyPDFLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import Chroma
import hashlib
class Documentsplit():
    def __init__(self,file_path):
        self.file_path=file_path
        self.embedding=HuggingFaceEmbeddings(
            model_name='./all-MiniLM-L6-v2',
            model_kwargs={'device':'cpu'},
            encode_kwargs={'normalize_embeddings':True,'batch_size':16}
        )
    def load_file(self):
        md5=hashlib.md5(self.file_path.encode('utf-8')).hexdigest()
        if str(self.file_path).endswith('.pdf'):
            loader=PyPDFLoader(self.file_path)
        elif str(self.file_path).endswith('.docx'):
            loader=Docx2txtLoader(self.file_path)
        elif str(self.file_path).endswith('.txt'):
            loader=TextLoader(self.file_path,encoding='utf-8')
        else:
            loader=''
        file=loader.load()

        text_split=RecursiveCharacterTextSplitter(
            chunk_size=1000,
            chunk_overlap=200
        )

        text=text_split.split_documents(file)
        db=Chroma.from_documents(
            text,
            self.embedding,
        )
        return db