from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, \
    AIMessagePromptTemplate
from langchain_openai import ChatOpenAI
from langchain_community.llms.chatglm import ChatGLM
from langchain.chains.llm import LLMChain
from utils import LOG


class TranslationChain:
    def __init__(self, verbose: bool = True):
        template = """You are a translation expert, proficient in various languages. \n
            Translates {source_language} to {target_language}."""

        system_message_prompt = SystemMessagePromptTemplate.from_template(template)
        human_template = '{text}'
        human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)

        chat_prompt = ChatPromptTemplate.from_messages(
            [system_message_prompt, human_message_prompt]
        )
        chat_model = ChatGLM(
            endpoint_url = 'http://127.0.0.1:8000'
        )

        # chat_model = ChatOpenAI(model_name=model_name,
        #                         temperature = 0,
        #                         verbose = verbose,
        #                         api_key='sk-L4YlzTHLk8zjANfR4232A402A0De4aB7Bd82939022245590',
        #                         base_url='https://api.wlai.vip/v1', )

        self.chain = LLMChain(llm = chat_model, prompt = chat_prompt, verbose = verbose)

    def run(self, text: str, source_language: str, target_language: str) -> (str, bool):
        result = ''
        try:
            result = self.chain.invoke({
                "text": text,
                "source_language": source_language,
                "target_language": target_language,
            })['text']
        except Exception as e:
            LOG.error(f"An error occurred during translation: {e}")
            return result, False

        return result, True