from langchain.document_loaders import PyPDFLoader
from langchain_community.embeddings import FastEmbedEmbeddings
from langchain_core.stores import InMemoryByteStore
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_chroma import Chroma

import uuid

from langchain.retrievers.multi_vector import MultiVectorRetriever

text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=5000,
    length_function=len,
    is_separator_regex=False,
    separators=[
        "\n\n",
        "\n",
        " ",
        ".",
        ",",
        "\u200b",  # Zero-width space
        "\uff0c",  # Fullwidth comma
        "\u3001",  # Ideographic comma
        "\uff0e",  # Fullwidth full stop
        "\u3002",  # Ideographic full stop
        "",
    ],
)


vectorstore = Chroma(
    collection_name="full_documents", embedding_function=FastEmbedEmbeddings(),
    persist_directory="../data/"
)

# The storage layer for the parent documents
store = InMemoryByteStore()
id_key = "doc_id"

# The retriever (empty to start)
retriever = MultiVectorRetriever(
    vectorstore=vectorstore,
    byte_store=store,
    id_key=id_key,
)

print(retriever)

paths = [
    "../data/sleep_literature/12888_2023_Article_4936.pdf",
    "../data/sleep_literature/12889_2024_Article_17759.pdf",
    "../data/sleep_literature/40001_2022_Article_848.pdf",
    "../data/sleep_literature/ijerph-19-09890.pdf",
    "../data/sleep_literature/JSR-32-e13766.pdf",
    "../data/sleep_literature/kim2021.pdf",
    "../data/sleep_literature/medicina-59-00041.pdf",
    "../data/sleep_literature/pone.0282085.pdf",
    "../data/sleep_literature/sutanto2021.pdf",
]

for path in paths:
    loader = PyPDFLoader(path)
    docs = loader.load()
    docs = text_splitter.split_documents(docs)

    doc_ids = [str(uuid.uuid4()) for _ in docs]

    # The splitter to use to create smaller chunks
    child_text_splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=20)

    sub_docs = []
    for i, doc in enumerate(docs):
        _id = doc_ids[i]
        _sub_docs = child_text_splitter.split_documents([doc])
        for _doc in _sub_docs:
            _doc.metadata[id_key] = _id
        sub_docs.extend(_sub_docs)

    # The splitter to use to create smaller chunks
    ids = retriever.vectorstore.add_documents(sub_docs)
    retriever.docstore.mset(list(zip(doc_ids, docs)))
    print(ids)
