import asyncio
import time
from typing import List

from langchain.chains import LLMChain
from langchain_community.callbacks import get_openai_callback
from langchain_core.callbacks import CallbackManager
from langchain_core.messages import HumanMessage, AIMessage
from llmchat.model import ApiResponse
from llmchat.model.chat import CompletionRequest
from llmchat.prompt import CHAT_COMPLETION_PROMPT
from llmchat.service.chat.BaseService import BaseService
from llmchat.stream.ChainStreamHandler import ChainStreamHandler
from loguru import logger


class ChatOpenAIService(BaseService):
    """OpenAI 处理类"""

    def completions(self, request: CompletionRequest):
        """openai文本生成"""
        start_time = time.time()

        # 创建监听器
        chain_stream_handler = None
        callback_manager = None
        if request.stream:
            chain_stream_handler = ChainStreamHandler(method_name="文本生成", requestId=request.requestId)
            callback_manager = CallbackManager([chain_stream_handler])

        # 创建语言模型
        llm = self.get_llm(
            model_name=request.model_name,
            temperature=request.temperature,
            stream=request.stream,
            callback_manager=callback_manager,
            max_tokens=request.max_tokens
        )
        chain = LLMChain(llm=llm, prompt=CHAT_COMPLETION_PROMPT.PROMPT)
        chat_history = []
        for chatMessage in request.chat_history:
            if chatMessage.role == "human":
                chat_history.append(HumanMessage(content=chatMessage.content))
            elif chatMessage.role == "assistant":
                chat_history.append(AIMessage(content=chatMessage.content))

        chain_parameter = {
            "question": request.prompt,
            "output_key": 'answer',
            "chat_history": chat_history
        }

        if request.stream:
            asyncio.set_event_loop(asyncio.new_event_loop())
            asyncio.get_event_loop().run_in_executor(None, chain.invoke, chain_parameter)
            generate = chain_stream_handler.generate_tokens()
            return generate
        else:
            with get_openai_callback() as cb:
                answer = chain.invoke(chain_parameter)
                end_time = time.time()
                logger.info("文本生成同步运行时间(" + request.requestId + ")：%.2f秒" % (end_time - start_time))
                logger.info("文本生成同步应答内容(" + request.requestId + ")：" + answer["text"])
                logger.info("文本生成同步token消耗(" + request.requestId + ")：" + str(cb))
                return ApiResponse.success(answer["text"])


