import asyncio
import time

from langchain_community.callbacks import get_openai_callback
from langchain_core.callbacks import CallbackManager
from langchain_core.messages import ChatMessage
from loguru import logger

from ai_engine.car_wrap.chain.extraction_chain import ExtractionChain
from ai_engine.core.base.base_service import BaseService
from ai_engine.core.callbacks.chain_stream_handler import QAChainStreamHandler
from ai_engine.core.model import result
from ai_engine.core.model.chat import ExtractionRequest

from ai_engine.utils.param_util import ParamUtil


class ExtractionService(BaseService):
    def extraction(self, request: ExtractionRequest):
        """根据上下文进行实体抽取"""

        start_time = time.time()

        # 验证输入参数
        self._extraction_validata(request)

        # 创建语言模型
        chainStreamHandler = None
        callbackManager = None
        if request.stream:
            chainStreamHandler = QAChainStreamHandler(method_name="实体抽取", request_id=request.request_id)
            callbackManager = CallbackManager([chainStreamHandler])

        # 创建回答问题语言模型
        llm = self.get_llm(
            temperature=request.temperature,
            stream=request.stream,
            verbose=True,
            max_tokens=request.max_tokens,
            callback_manager=callbackManager
        )
        # 历史对话
        chat_history = request.chat_history
        human_msg = ChatMessage(role="human", content=request.prompt)
        chat_history.append(human_msg)
        chat_history_str = ParamUtil.get_chat_history_str(chat_history)
        chain_parameter = {
            "question": request.prompt,
            "chat_history": chat_history_str
        }
        # 实体抽取链
        chain = ExtractionChain(llm=llm,
                                request=request)
        if request.stream:
            asyncio.set_event_loop(asyncio.new_event_loop())
            asyncio.get_event_loop().run_in_executor(None, chain.invoke, chain_parameter)
            generate = chainStreamHandler.generate_tokens()
            return generate
        else:
            with get_openai_callback() as cb:
                response = chain.invoke(chain_parameter)
                answer = response["answer"]
                answer = answer.replace("```json", "").replace("```", "")
                res = result.success(answer)

                end_time = time.time()
                logger.info("实体抽取同步运行时间(" + request.request_id + ")：%.2f秒" % (end_time - start_time))
                logger.info("实体抽取同步应答内容(" + request.request_id + ")：" + answer)
                logger.info("实体抽取同步token消耗(" + request.request_id + ")：" + str(cb))
                return res

    def _extraction_validata(self, request: ExtractionRequest):
        if len(request.model_name) < 1:
            raise ValueError(f"model_name error,Need to provide.")
        if len(request.prompt) < 1:
            raise ValueError(f"prompt error,Need to provide.")
        for chat in request.chat_history:
            if chat.role not in ["human", "assistant"]:
                raise ValueError(f"chat_history role error,Only supports human or assistant.")
            if len(chat.content) < 1:
                raise ValueError(f"chat content error,Need to provide.")
