import sentence_transformers
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
from chatglm_loader import ChatGLM


embedding_model_dict = {
    "ernie-tiny": "nghuyong/ernie-3.0-nano-zh",
    "ernie-base": "nghuyong/ernie-3.0-base-zh",
    "text2vec": "C:\\Work\\llm\\text2vec-large-chinese",
    "text2vec2": "uer/sbert-base-chinese-nli",
    "text2vec3": "shibing624/text2vec-base-chinese",
}

EMBEDDING_MODEL = "text2vec"
# 初始化 hugginFace 的 embeddings 对象
embeddings = HuggingFaceEmbeddings(model_name=embedding_model_dict[EMBEDDING_MODEL])
embeddings.client = sentence_transformers.SentenceTransformer(
    embeddings.model_name, device='cpu')

db = Chroma(persist_directory="./chroma/news_test", embedding_function=embeddings)

# 初始化 prompt 对象
# question = "2022年腾讯营收多少"
# similarDocs = db.similarity_search(question, include_metadata=True,k=4)
# [print(x) for x in similarDocs]


# 选择模型
# model = ChatGLM()
# model.load_model("C:\\Work\\llm\\ChatGLM2-6B\\THUDM\\chatglm2-6b")
#
# retriever = db.as_retriever()
# qa = RetrievalQA.from_chain_type(llm=model, chain_type="stuff", retriever=retriever)
#
# # 进行问答
# query = "2022年腾讯营收多少"
# print(qa.run(query))
#
#
# query = "2022年底腾讯有多少员工"
# print(qa.run(query))
#
#
# query = "2022年腾讯离职员工数量"
# print(qa.run(query))
