#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# @Project : RAG-demo
# @File    : llm_util.py
# @IDE     : PyCharm
# @Author  :ZH
# @Time    : 2025/6/27 16:23
from typing import List

from langchain_openai import ChatOpenAI
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage

from util.schema import MessageSchema


class LLMUtil:
    def __init__(self, base_url: str, model: str, api_key: str, temperature=0.7, max_tokens=1024):
        """
        Initializes the LLMUtil with the specified parameters.
        :param base_url:
            The base URL for the LLM API.
        :param model:
            The model to be used for the LLM.
        :param api_key:
            The API key for authentication with the LLM service.
        :param temperature:
            Controls the randomness of the model's output. Higher values (e.g., 0.7) make the output more random, while lower values (e.g., 0.2) make it more focused and deterministic.
        :param max_tokens:
            The maximum number of tokens to generate in the response. This limits the length of the output.
        """
        self.llm = ChatOpenAI(
            base_url=base_url,
            model=model,
            openai_api_key=api_key,
            max_tokens=max_tokens,
            temperature=temperature,
            stream_usage=True  # 流式输出也需要token记录的输出
        )

        self.documents_prompt = """You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use keep the answer concise.
Question: {question} 
Context: {context} 
Answer:"""

    def __load_history_message(self, history: List[MessageSchema]) -> list:
        return [mes for i in history for mes in
                [self.__create_human_message(content=i.human_message), self.__create_ai_message(i.ai_message)]]

    async def invoke(
            self, question: str, history: List[MessageSchema] = None, system_prompt: str = None,
            documents: List[str] = None
    ):
        """
        Invokes the LLM with the provided question, history, system prompt, and documents.
        :param question:
            The question to be asked to the LLM.
        :param history:
            A list of previous messages in the conversation history.
        :param system_prompt:
            A system message that sets the context or rules for the LLM's responses.
        :param documents:
            A list of documents or context that can be used to answer the question.
        :return:
        """
        messages = self.__create_message(
            question=question, history=history, system_prompt=system_prompt, documents=documents
        )

        return await self.llm.ainvoke(messages)

    async def chat_stream(
            self, question: str, history: List[MessageSchema] = None, system_prompt: str = None,
            documents: List[str] = None
    ):
        """
        Streams the response from the LLM for the given question and context.
        :param question:
            The question to be asked to the LLM.
        :param history:
            A list of previous messages in the conversation history.
        :param system_prompt:
            A system message that sets the context or rules for the LLM's responses.
        :param documents:
            A list of documents or context that can be used to answer the question.
        :return:
        """
        messages = self.__create_message(
            question=question, history=history, system_prompt=system_prompt, documents=documents
        )

        async for message in self.llm.astream(messages):
            yield message

    def __create_message(
            self, question: str, history: List[MessageSchema] = None,
            system_prompt: str = None, documents: List[str] = None
    ) -> List[SystemMessage | HumanMessage | AIMessage]:
        """
        Creates a list of messages to be sent to the LLM based on the provided parameters.
        :param question:
        :param history:
        :param system_prompt:
        :param documents:
        :return:
        """
        messages = []
        if system_prompt:
            messages.append(self.__create_system_message(content=system_prompt))
        if documents:
            if isinstance(documents, list):
                documents = "\n".join([f'{index + 1}. {value}' for index, value in enumerate(documents)])
            question = self.documents_prompt.format(question=question, context=documents)
        if history:
            messages.extend(self.__load_history_message(history))
        messages.append(self.__create_human_message(content=question))
        return messages

    def __create_system_message(self, content: str) -> SystemMessage:
        return SystemMessage(content=content)

    def __create_human_message(self, content: str) -> HumanMessage:
        return HumanMessage(content=content)

    def __create_ai_message(self, content: str) -> AIMessage:
        return AIMessage(content=content)
