import os
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
from langchain_huggingface import HuggingFaceEmbeddings
from transformers import AutoModel, AutoTokenizer


from langchain.chains.retrieval_qa.base import VectorDBQA, RetrievalQA
from langchain_community.document_loaders import DirectoryLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import ChatOpenAI
from langchain_text_splitters import CharacterTextSplitter
import numpy as np

# 加载模型
llm = ChatOpenAI(
    openai_api_key="key",
    openai_api_base="https://api.moonshot.cn/v1",
    model="moonshot-v1-8k",
    temperature=0,
    request_timeout=60,
    max_retries=3,
)

# 加载文件夹下面所有的txt类型的文件
loader = DirectoryLoader("../", glob='**/*.txt')
documents = loader.load()

# 分割文本
text_splitter = CharacterTextSplitter(chunk_size=200, chunk_overlap=0)
split_docs = text_splitter.split_documents(documents)

# # 加载本地模型
# model_name = "sentence-transformers/all-mpnet-base-v2"
# model = AutoModel.from_pretrained(model_name)
# tokenizer = AutoTokenizer.from_pretrained(model_name)
# #
# # # 初始化嵌入模型
# embeddings = HuggingFaceEmbeddings(model=model, tokenizer=tokenizer)
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")


# # 创建Chroma向量数据库  persist_directory: 持久化向量数据目录
# docsearch = Chroma.from_documents(split_docs, embeddings,persist_directory="../persist_data")

#加载数据
docsearch=Chroma(persist_directory="../persist_data",embedding_function=embeddings)
# 创建问答对象
qa = RetrievalQA.from_chain_type(
    llm=llm,
    chain_type="refine",
    retriever=docsearch.as_retriever(),
    return_source_documents=True
)

# 提问
result = qa.invoke({"query": "这个新闻的主要描述人是谁？"})
print(result)