from langchain_community.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain_core.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder
from langchain.memory import ConversationBufferMemory
from langchain.agents import create_structured_chat_agent, AgentExecutor, tool
from langchain.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
#设置one-api
ONEAPI_API_BASE = "http://localhost:3000/v1"
ONEAPI_CHAT_API_KEY = "sk-4Rqi5cIas3Rg5cwh59231eB0B82c42E7AdAe9a17CcE29591"
ONEAPI_CHAT_MODEL = "qwen-plus"
ONEAPI_EMBEDDING_API_KEY = "sk-4Rqi5cIas3Rg5cwh59231eB0B82c42E7AdAe9a17CcE29591"
ONEAPI_EMBEDDING_MODEL = "text-embedding-v1"


class Agent_Master:

    llm = ChatOpenAI(
        base_url=ONEAPI_API_BASE,
        api_key=ONEAPI_CHAT_API_KEY,
        model=ONEAPI_CHAT_MODEL,  # 本次使用的模型
        temperature=0,  # 发散的程度，一般为0
        timeout=None,  # 服务请求超时
        max_retries=2,  # 失败重试最大次数
    )
    embedding_model = OpenAIEmbeddings(
        model=ONEAPI_EMBEDDING_MODEL,
        base_url=ONEAPI_API_BASE,
        api_key=ONEAPI_EMBEDDING_API_KEY,
        deployment=ONEAPI_EMBEDDING_MODEL)
    #设置记忆存储键名
    memory_key = "chat_history_test"
    #设置系统提示词模板
    messages = [
        SystemMessagePromptTemplate.from_template(
            "你是一位得力的ai助手，可以协助用户处理问题，同时对于历史提过的问题以及回答，同样可以快读的输出"),
        MessagesPlaceholder(variable_name=memory_key),
        HumanMessagePromptTemplate.from_template('{input_info}')
    ]
    prompt = ChatPromptTemplate.from_messages(messages=messages)
    memory = ConversationBufferMemory(memory_key=memory_key,
                                            return_messages=True,
                                            llm=llm)