import os
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
from langchain_community.vectorstores import Chroma
from customize.langchain_customized_huggingface import HuggingFaceBgeEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter

model_name = "F:/models/BAAI/bge-large-en-v1.5"
# model_name = "BAAI/bge-large-zh-v1.5"
model_kwargs = {'device': 'cuda'}
encode_kwargs = {'normalize_embeddings': True}  # set True to compute cosine similarity

embeddings = HuggingFaceBgeEmbeddings(
    model_name=model_name,
    model_path=model_name,
    model_kwargs=model_kwargs,
    encode_kwargs=encode_kwargs,
    # query_instruction="为这个句子生成表示以用于检索相关文章："
)


# urls = [
#     "https://lilianweng.github.io/posts/2023-06-23-agent/",
# #     "https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/",
# #     "https://lilianweng.github.io/posts/2023-10-25-adv-attack-llm/",
# ]
#
#
# docs = [WebBaseLoader(url).load() for url in urls]
# print("web load finished!")
# docs_list = [item for sublist in docs for item in sublist]
from langchain_community.document_loaders import TextLoader
loader = TextLoader('resources/state_of_the_union.txt', encoding='utf-8')
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
    chunk_size=100, chunk_overlap=50
)
doc_splits = text_splitter.split_documents(documents)

# Add to vectorDB
vectorstore = Chroma.from_documents(
    documents=doc_splits,
    collection_name="rag-chroma",
    embedding=embeddings,
)
retriever = vectorstore.as_retriever()