# # import chromadb
# # from chromadb.utils import embedding_functions
from customize.get_ollama import GetOllama
# #embedding = GetOllama(model_name="qwen2.5:1.5b", model_type=2)()
# llm = GetOllama(ip="10.12.8.21:11434", model_name="llama3.1", model_type=1)()
llm = GetOllama(model_type=1)()
# # client = chromadb.PersistentClient(path="chroma_db")
# #
# # collection = client.get_or_create_collection(name="my_collection")
# # collection.upsert(
# #     documents=[
# #         "This is a document about pineapple",
# #         "This is a document about oranges"
# #     ],
# #     ids=["id1", "id2"]
# # )
from langchain_chroma import Chroma
#from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
# from langchain.llms import OpenAI
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import TextLoader
from customize.langchain_customized_huggingface import HuggingFaceBgeEmbeddings


model_name = "F:/models/BAAI/bge-large-en-v1.5"
# model_name = "BAAI/bge-large-zh-v1.5"
model_kwargs = {'device': 'cuda'}
encode_kwargs = {'normalize_embeddings': True}  # set True to compute cosine similarity

embedding = HuggingFaceBgeEmbeddings(
    model_name=model_name,
    model_path=model_name,
    model_kwargs=model_kwargs,
    encode_kwargs=encode_kwargs,
    # query_instruction="为这个句子生成表示以用于检索相关文章："
)

#embedding = OllamaEmbeddings(model="llama3.1")
# Load and process the text
loader = TextLoader('resources/state_of_the_union.txt', encoding='utf-8')
documents = loader.load()

text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)

# Embed and store the texts
# Supplying a persist_directory will store the embeddings on disk
persist_directory = 'db'

# embedding = OpenAIEmbeddings()
# vectordb = Chroma.from_documents(documents=texts, embedding=embedding, collection_name="mytest", persist_directory=persist_directory)
vectordb = Chroma(embedding_function=embedding, collection_name="mytest", persist_directory=persist_directory)
# vectordb.persist()
# vectordb = None
retri = vectordb.as_retriever()
# Now we can load the persisted database from disk, and use it as normal.
# vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding)
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retri)

query = "What did the president say about Ketanji Brown Jackson"
print(qa.invoke(query))

# To cleanup, you can delete the collection
# vectordb.delete_collection()
# vectordb.persist()
