from langchain_community.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain_core.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder
from langchain.memory import ConversationBufferMemory
from langchain.agents import create_structured_chat_agent, AgentExecutor, tool
from langchain import hub
from langchain.chains import LLMChain
from langchain.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
#设置one-api
ONEAPI_API_BASE = "http://localhost:3000/v1"
ONEAPI_CHAT_API_KEY = "sk-oeDSXZfGcYIfESrY11F10080Fe404f178e597bDfBa9aFa58"
ONEAPI_CHAT_MODEL = "qwen-plus"
ONEAPI_EMBEDDING_API_KEY = "sk-oeDSXZfGcYIfESrY11F10080Fe404f178e597bDfBa9aFa58"
ONEAPI_EMBEDDING_MODEL = "text-embedding-v1"


class Master:

    def __init__(self):
        #初始化ChatOpenAI
        self.llm = ChatOpenAI(
            base_url=ONEAPI_API_BASE,
            api_key=ONEAPI_CHAT_API_KEY,
            model=ONEAPI_CHAT_MODEL,  # 本次使用的模型
            temperature=0,  # 发散的程度，一般为0
            timeout=None,  # 服务请求超时
            max_retries=2,  # 失败重试最大次数
        )
        self.embedding_model = OpenAIEmbeddings(
            model=ONEAPI_EMBEDDING_MODEL,
            base_url=ONEAPI_API_BASE,
            api_key=ONEAPI_EMBEDDING_API_KEY,
            deployment=ONEAPI_EMBEDDING_MODEL)
        #设置记忆存储键名
        self.memory_key = "chat_history_test"
        #设置系统提示词模板
        self.messages = [
            SystemMessagePromptTemplate.from_template(
                "你是一位得力的ai助手，可以协助用户处理问题，同时对于历史提过的问题以及回答，同样可以快读的输出"),
            MessagesPlaceholder(variable_name=self.memory_key),
            HumanMessagePromptTemplate.from_template('{input_info}')
        ]
        self.prompt = ChatPromptTemplate.from_messages(messages=self.messages)
        self.memory = ConversationBufferMemory(memory_key=self.memory_key,
                                               return_messages=True,
                                               llm=self.llm)

    def llm_chain(self):
        chain = LLMChain(llm=self.llm, prompt=self.prompt, memory=self.memory)
        return chain

    # def retriever_chain(self):
    #     Chroma.from_documents(documents="", embedding=self.embedding_model)
