from langchain_community.vectorstores import Chroma
from langchain_huggingface import HuggingFaceEmbeddings

from langchain.memory import ConversationBufferMemory

from langchain_community.llms import Tongyi
import os
from langchain.chains import ConversationalRetrievalChain
# 加载数据库 persist_directory:数据库路径
def load_vectordb(persist_directory): 
    # 定义 Embeddings
    embeddings = HuggingFaceEmbeddings(model_name="m3e-base")
    # 加载数据库
    vectordb = Chroma(
        persist_directory=persist_directory, 
        embedding_function=embeddings
    )
    return vectordb

def create_memory():
    memory = ConversationBufferMemory(
        memory_key="chat_history",  # 与 prompt 的输入变量保持一致。
        return_messages=True  # 将以消息列表的形式返回聊天记录，而不是单个字符串
    )
    return memory

def con_llm():
    os.environ["DASHSCOPE_API_KEY"] = 'sk-***'
    llm = Tongyi()
    return llm


# 构建新的问答链，使用带有记忆的提示模板 
def chatqwen_chat(message, history):
     #构建对话问答链
     qa = ConversationalRetrievalChain.from_llm(
		con_llm(),
		retriever=load_vectordb('data_base/chroma').as_retriever(),
		memory=create_memory(),
		verbose=True,
	 )
     result = qa({"question": message})
     return result['answer']