from langchain.document_loaders import TextLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.retrievers import ParentDocumentRetriever
from langchain.storage import InMemoryStore
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores.faiss import FAISS

loaders = [
    TextLoader("files/paul_graham_essay.txt"),
    TextLoader("files/state_of_the_union.txt")
]
docs = []
for item in loaders:
    docs.extend(item.load())

child_splitter = RecursiveCharacterTextSplitter(chunk_size=400)
vector_store = FAISS.from_documents(docs, OpenAIEmbeddings())
store = InMemoryStore()
retriever = ParentDocumentRetriever(
    vectorstore=vector_store,
    docstore=store,
    child_splitter=child_splitter,
)
retriever.add_documents(docs, ids=None)
print(list(store.yield_keys()))

sub_docs = vector_store.similarity_search("justice breyer")
print(sub_docs[0].page_content)
retrieved_docs = retriever.get_relevant_documents("justice breyer")
len(retrieved_docs[0].page_content)
