import json
import queue
import time
from abc import ABC
from collections.abc import Generator
from typing import Any, Optional, Union, Dict, List
from uuid import UUID

from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import LLMResult
from loguru import logger
from openai import OpenAIError, AuthenticationError, RateLimitError
from volcenginesdkarkruntime._exceptions import ArkError
from ai_engine.core.model import result
from ai_engine.core.model.result_code import ERROR_AI_DEFAULT_ERROR, ERROR_AI_ACCOUNT_ERROR, ERROR_AI_RATE_LIMIT_ERROR, \
    ERROR_DEFAULT_ERROR


def token_usage(usage_metadata):
    if usage_metadata:
        return (
            f"Tokens Used: {usage_metadata['total_tokens']},"
            f" Prompt Tokens: {usage_metadata['input_tokens']},"
            f" Completion Tokens: {usage_metadata['output_tokens']},"
        )
    return "None"


class QAChainStreamHandler(BaseCallbackHandler, ABC):
    end = json.dumps(result.success_dict("[DONE]"), ensure_ascii=False)
    start_time = None

    def __init__(self, method_name: str = "", request_id: str = ""):
        self.receive_time = None
        self.tokens = queue.Queue()
        self.method_name = method_name
        self.request_id = request_id
        super(BaseCallbackHandler, self).__init__()

    def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
        token = token.replace("```json\n", "").replace("\n```", "")
        self.tokens.put(token)

    def on_llm_start(
            self,
            serialized: Dict[str, Any],
            prompts: List[str],
            *,
            run_id: UUID,
            parent_run_id: Optional[UUID] = None,
            tags: Optional[List[str]] = None,
            metadata: Optional[Dict[str, Any]] = None,
            **kwargs: Any,
    ) -> Any:
        """Run when LLM starts running."""
        self.receive_time = time.time()
        if "start_time" in metadata:
            self.start_time = metadata["start_time"]
        logger.info(self.method_name + "sse开始接收数据(" + self.request_id + ")")

    def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
        end_time = time.time()
        res_content = "响应内容异常"
        usage_metadata = None
        if len(response.generations) > 0:
            if len(response.generations[0]) > 0:
                generation = response.generations[0][0]
                res_content = generation.text.replace(" ", "")
                try:
                    usage_metadata = generation.message.usage_metadata
                except Exception as e:
                    logger.error("获取token消耗异常", str(e))

        if self.start_time:
            exec_time = end_time - self.start_time
        else:
            exec_time = 0
        logger.info(self.method_name + "sse运行时间(" + self.request_id + ")：%.2f秒" % exec_time
                    + "【接收数据耗时：%.2f秒】" % (end_time - self.receive_time))
        logger.info(self.method_name + "sse应答内容(" + self.request_id + "):" + res_content)
        logger.info(self.method_name + "token消耗(" + self.request_id + "):" + token_usage(usage_metadata))
        self.tokens.put(StopIteration)

    def on_llm_error(
            self,
            error: Union[Exception, KeyboardInterrupt],
            *,
            run_id: UUID,
            parent_run_id: Optional[UUID] = None,
            **kwargs: Any,
    ) -> None:
        self.tokens.put(error)

    def on_chain_error(
            self,
            error: Union[Exception, KeyboardInterrupt],
            *,
            run_id: UUID,
            parent_run_id: Optional[UUID] = None,
            **kwargs: Any,
    ) -> None:
        self.tokens.put(error)

    def on_tool_error(
            self,
            error: Union[Exception, KeyboardInterrupt],
            *,
            run_id: UUID,
            parent_run_id: Optional[UUID] = None,
            **kwargs: Any,
    ) -> None:
        self.tokens.put(error)

    def generate_tokens(self) -> Generator:
        while True:
            tk = self.tokens.get()
            try:
                if tk is StopIteration:
                    yield f"data: {self.end}\n\n"
                    return
                elif isinstance(tk, ArkError) or isinstance(tk, OpenAIError):
                    error_class = type(tk)
                    error_class_name = error_class.__name__

                    errorBase = ERROR_AI_DEFAULT_ERROR
                    if error_class is AuthenticationError:
                        errorBase = ERROR_AI_ACCOUNT_ERROR
                    elif error_class is RateLimitError:
                        errorBase = ERROR_AI_RATE_LIMIT_ERROR

                    error = json.dumps(
                        result.error_dict(error_base=errorBase, message_append=error_class_name + "," + str(tk)),
                        ensure_ascii=False)
                    yield f"data: {error}\n\n"
                    yield f"data: {self.end}\n\n"

                    logger.error("sse异常(" + self.request_id + "):" + error_class_name + ",tk:" + str(tk))
                    return
                else:
                    data = json.dumps(result.success_dict(tk), ensure_ascii=False)
                    yield f"data: {data}\n\n"
            except Exception as ex:
                logger.error("sse异常(" + self.request_id + "):" + str(ex) + ",tk:" + str(tk))
                error = json.dumps(result.error_dict(error_base=ERROR_DEFAULT_ERROR, message_append=str(tk)),
                                   ensure_ascii=False)
                yield f"data: {error}\n\n"
                yield f"data: {self.end}\n\n"
                return
