#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author: zyx
@Date: 2024/11/29 12:01
@FileName: qa.py
@Description: 问答系统
"""
import os
from dotenv import load_dotenv, find_dotenv
from prompts import SYSTEM_TPL
from langchain_openai import ChatOpenAI
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.agents import create_tool_calling_agent, AgentExecutor
from tools import search_tool, get_rag_tool_by_type
from typing import Literal
from memory import get_memory_by_type
from langchain_core.runnables.history import RunnableWithMessageHistory

class ChemQA:

    def __init__(
        self,
        rag_dir: str,
        rag_type: Literal["vector_query", "lightrag"] = "vector_query",
        memory_type: Literal["memory", "redis"] = "memory",
    ) -> None:
        load_dotenv(find_dotenv())
        self.chatmodel = ChatOpenAI(
            model=os.environ["BASE_CHAT_LLM"],
            temperature=0,
        )
        self.tools = []
        self.tools.append(get_rag_tool_by_type(rag_type, rag_dir))
        self.tools.append(search_tool)
        self.memory = get_memory_by_type(memory_type)
        self.prompt = ChatPromptTemplate.from_messages(
            [
                ("system", SYSTEM_TPL),
                MessagesPlaceholder(variable_name="chat_history"),
                ("user", "{input}"),
                MessagesPlaceholder(variable_name="agent_scratchpad"),
            ]
        )
        self.agent = create_tool_calling_agent(
            self.chatmodel,
            self.tools,
            self.prompt,
        )
        self.agent_executor = AgentExecutor(
            agent=self.agent, tools=self.tools, verbose=True
        )
        self.runnable_chain = RunnableWithMessageHistory(
            self.agent_executor,
            get_memory_by_type(memory_type),
            input_messages_key="input",
            history_messages_key="chat_history",
        )

    def answer(self, query: str, session_id: str) -> str:
        response = self.runnable_chain.invoke(
            {"input": query},
            config={"configurable": {"session_id": session_id}},
        )
        return response['output']
