from langchain.document_loaders.base import BaseLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.indexes import SQLRecordManager, index
from langchain.schema import Document
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores.elasticsearch import ElasticsearchStore

collection_name = "test_index"
embedding = OpenAIEmbeddings()

vectorstore = ElasticsearchStore(
    es_url="http://localhost:9200", index_name="test_index", embedding=embedding
)
namespace = f"elasticsearch/{collection_name}"
record_manager = SQLRecordManager(
    namespace, db_url="sqlite:///record_manager_cache.sql"
)
record_manager.create_schema()
doc1 = Document(page_content="kitty", metadata={"source": "kitty.txt"})
doc2 = Document(page_content="doggy", metadata={"source": "doggy.txt"})


def _clear():
    """Hacky helper method to clear content. See the `full` mode section to understand why it works."""
    index([], record_manager, vectorstore, cleanup="full", source_id_key="source")


_clear()

index(
    [doc1, doc1, doc1, doc1, doc1],
    record_manager,
    vectorstore,
    cleanup=None,
    source_id_key="source",
)

_clear()
# None 该模式不会自动清理旧版本的内容；但是，它仍然负责内容重复删除。
index([doc1, doc2], record_manager, vectorstore, cleanup=None, source_id_key="source")

index([doc1, doc2], record_manager, vectorstore, cleanup=None, source_id_key="source")

_clear()
# “增量”删除模式 再次索引应该会导致两个文档都被跳过 - 也跳过嵌入操作！
index(
    [doc1, doc2],
    record_manager,
    vectorstore,
    cleanup="incremental",
    source_id_key="source",
)

# 如果我们改变一个文档，将编写新版本，并删除共享同一源代码的所有旧版本。
changed_doc_2 = Document(page_content="puppy", metadata={"source": "doggy.txt"})
index(
    [changed_doc_2],
    record_manager,
    vectorstore,
    cleanup="incremental",
    source_id_key="source",
)

# 在完整模式下，用户应该传递应索引到索引功能中的完整内容。 任何未传递到索引功能且存在于向量存储中的文档都将被删除！此行为对于处理源文档的删除很有用。
_clear()
all_docs = [doc1, doc2]

index(all_docs, record_manager, vectorstore, cleanup="full", source_id_key="source")

# 假设有人删除了第一个文档：
del all_docs[0]
# 使用完整模式也会清除已删除的内容。
index(all_docs, record_manager, vectorstore, cleanup="full", source_id_key="source")

# 如果已经有相同的源(Document中的metadata的source字段)， 使用incremental会删除原有的source
_clear()
doc1 = Document(
    page_content="kitty kitty kitty kitty kitty", metadata={"source": "kitty.txt"}
)
doc2 = Document(page_content="doggy doggy the doggy", metadata={"source": "doggy.txt"})

new_docs = CharacterTextSplitter(
    separator="t", keep_separator=True, chunk_size=12, chunk_overlap=2
).split_documents([doc1, doc2])
index(
    new_docs,
    record_manager,
    vectorstore,
    cleanup="incremental",
    source_id_key="source",
)
changed_doggy_docs = [
    Document(page_content="woof woof", metadata={"source": "doggy.txt"}),
    Document(page_content="woof woof woof", metadata={"source": "doggy.txt"}),
]
# 会删除两个doggy.txt的source
index(
    changed_doggy_docs,
    record_manager,
    vectorstore,
    cleanup="incremental",
    source_id_key="source",
)

# 自定义文档加载器


class MyCustomLoader(BaseLoader):
    def lazy_load(self):
        text_splitter = CharacterTextSplitter(
            separator="t", keep_separator=True, chunk_size=12, chunk_overlap=2
        )
        docs = [
            Document(page_content="woof woof", metadata={"source": "doggy.txt"}),
            Document(page_content="woof woof woof", metadata={"source": "doggy.txt"}),
        ]
        yield from text_splitter.split_documents(docs)

    def load(self):
        return list(self.lazy_load())


_clear()

loader = MyCustomLoader()

loader.load()
index(loader, record_manager, vectorstore, cleanup="full", source_id_key="source")
vectorstore.similarity_search("dog", k=30)