from langchain.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
import pickle
import path

loader = TextLoader(
    file_path=path.TXT_PATH, encoding="utf-8")
document = loader.load()

chunk_size = 80
text_splitter = RecursiveCharacterTextSplitter(
    # separators=["\n\n"],
    separators="\n",
    chunk_size=chunk_size,
    chunk_overlap=0,
    length_function=len,
)
split_docs = text_splitter.split_documents(documents=document)


textdb = [doc.page_content for doc in split_docs]
with open(f"../dataset/paragraph_list1_{chunk_size}.pkl", "wb") as f:
    pickle.dump(textdb, f)

db = Chroma.from_documents(
    split_docs,
    HuggingFaceEmbeddings(model_name=path.MODEL_NAME),
    persist_directory=f"../vdb/chroma_db_v5.{chunk_size}",
)
