import uuid

import chromadb
from chromadb.config import Settings
from chromadb.utils import embedding_functions
from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceBgeEmbeddings
from langchain.document_loaders import TextLoader
from chinese_recursive_text_splitter import ChineseRecursiveTextSplitter

model_name = "BAAI/bge-large-zh"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': True} # set True to compute cosine similarity
embeddings = HuggingFaceBgeEmbeddings(
    model_name=model_name,
    model_kwargs=model_kwargs,
    encode_kwargs=encode_kwargs
)

loader = TextLoader('./text/sanguoyanyi.txt')
data = loader.load()

sentence_transformer_ef = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="BAAI/bge-large-zh")

text_splitter = ChineseRecursiveTextSplitter(chunk_size = 384, chunk_overlap = 0)
docs = text_splitter.split_documents(data)

client = chromadb.HttpClient(settings=Settings(allow_reset=False))
# client.reset()  # resets the database
collection = client.create_collection("sanguo_collection_3", embedding_function=sentence_transformer_ef)
for doc in docs:
    print('add doc', doc.page_content)
    collection.add(
        ids=[str(uuid.uuid1())], metadatas=doc.metadata, documents=doc.page_content
    )
print('docs added to DB')
# tell LangChain to use our client and collection name
# db4 = Chroma(
#     client=client,
#     collection_name="sanguo_collection_3",
#     embedding_function=embeddings,
# )
# query = "What did the president say about Ketanji Brown Jackson"
# docs = db4.similarity_search(query)
# print(docs[0].page_content)