import os
from abc import ABC
from typing import Dict, Any, Optional, List, Iterator, Tuple

from langchain_core.callbacks import CallbackManagerForLLMRun, Callbacks
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import BaseMessage, HumanMessage, FunctionMessage, AIMessage, SystemMessage, \
    AIMessageChunk, BaseMessageChunk, HumanMessageChunk, SystemMessageChunk, ChatMessage, message_chunk_to_message, \
    ChatMessageChunk
from langchain_core.outputs import ChatResult, ChatGeneration, ChatGenerationChunk, LLMResult
from langchain_core.prompt_values import PromptValue
from langchain_core.pydantic_v1 import Field
from langchain_core.utils import get_colored_text
from pydantic import root_validator
from volcenginesdkarkruntime.types.chat import ChatCompletion, ChatCompletionChunk, ChatCompletionMessage
from volcenginesdkarkruntime.types.chat.chat_completion_chunk import ChoiceDelta

from ai_engine.core.llm.volcengine.ark_common import ArkFactory


def _convert_delta_to_message_chunk(
        delta: ChoiceDelta
) -> BaseMessageChunk:
    role = delta.role
    content = delta.content

    if role == "user":
        return HumanMessageChunk(content=content)
    elif role == "assistant":
        return AIMessageChunk(content=content)
    elif role == "system":
        return SystemMessageChunk(content=content)
    else:
        return ChatMessageChunk(role=role, content=content)


def _convert_message_to_dict(message: BaseMessage) -> dict:
    if isinstance(message, SystemMessage):
        message_dict = {"role": "system", "content": message.content}
    elif isinstance(message, HumanMessage):
        message_dict = {"role": "user", "content": message.content}
    elif isinstance(message, AIMessage):
        message_dict = {"role": "assistant", "content": message.content}
    elif isinstance(message, FunctionMessage):
        message_dict = {"role": "function", "content": message.content}
    else:
        raise ValueError(f"Got unknown type {message}")
    return message_dict


def _convert_result_to_message(message: ChatCompletionMessage) -> BaseMessage:
    """Convert ChatCompletionMessage to a LangChain message.
    """
    role = message.role
    if role == "user":
        return HumanMessage(content=message.content)
    elif role == "assistant":
        return AIMessage(content=message.content)
    elif role == "system":
        return SystemMessage(content=message.content)
    elif role == "function":
        return FunctionMessage(content=message.content, name=message.function_call.name)
    else:
        return ChatMessage(content=message.content, role=role)


def generate_from_stream(stream: Iterator[ChatGenerationChunk]) -> ChatResult:
    """流式响应结果处理"""

    generation: Optional[ChatGenerationChunk] = None
    for chunk in stream:
        if generation is None:
            generation = chunk
        else:
            generation += chunk
    assert generation is not None
    return ChatResult(
        generations=[
            ChatGeneration(
                message=message_chunk_to_message(generation.message),
                generation_info=generation.generation_info,
            )
        ]
    )


class VolcengineArkChatModel(BaseChatModel, ABC):
    """火山方舟聊天模型"""
    # 火上方舟client
    client: Any
    volc_ak: str | None = None,
    volc_sk: str | None = None,
    # 以 endpoint_id 索引对应的模型接入点。
    model: str = ""
    """火山引擎大模型"""
    streaming: Optional[bool] = False
    """request timeout for chat http requests, 单位秒"""
    request_timeout: Optional[int] = Field(30, alias="timeout")
    """temperature"""
    temperature: Optional[float] = 1
    top_p: Optional[float] = 0.7
    max_tokens: Optional[int] = None
    stop: Optional[List[str]] = None,
    """What sampling temperature to use."""
    model_kwargs: Dict[str, Any] = Field(default_factory=dict)
    stream_options: Dict[str, Any] = None

    @root_validator()
    def validate_environment(cls, values: Dict) -> Dict:
        try:
            if values["model"] is None or values["model"] == "":
                raise ValueError("must set model")
            ak = values["volc_ak"]
            sk = values["volc_sk"]
            if ak is None:
                ak = os.environ.get("VOLC_ACCESSKEY")
            if sk is None:
                sk = os.environ.get("VOLC_SECRETKEY")
            values["client"] = ArkFactory.get_instance(ak=ak, sk=sk)
        except ImportError:
            raise ImportError(
                "volcenginesdk package not found, please install it with "
                "`pip install  volcengine-python-sdk `"
            )
        return values

    @property
    def _llm_type(self) -> str:
        """Return type of llm."""
        return "volcengine-volcengine-llm"

    @property
    def _default_params(self) -> Dict[str, Any]:
        """Get the default parameters for calling Volcengine API."""
        params = {
            "model": self.model,
            "stream": self.streaming,
            "temperature": self.temperature,
            "top_p": self.top_p,
            **self.model_kwargs,
        }
        if self.max_tokens is not None:
            params["max_tokens"] = self.max_tokens
        if self.stop is not None:
            params["stop"] = self.stop
        if self.stream_options is None:
            params["stream_options"] = {
                "include_usage": True
            }
        return params

    def _generate(
            self,
            messages: List[BaseMessage],
            stop: Optional[List[str]] = None,
            run_manager: Optional[CallbackManagerForLLMRun] = None,
            **kwargs: Any,
    ) -> ChatResult:
        """Call out to an volcengine models endpoint for each generation with a prompt.
        Args:
            messages: The messages to pass into the model.
            stop: Optional list of stop words to use when generating.
        Returns:
            The string generated by the model.
        """
        # 打印prompt
        for msg in messages:
            if msg.type == "system":
                _colored_text = get_colored_text(msg.content, "green")
                _text = "Prompt after formatting:\n" + _colored_text
                if run_manager:
                    run_manager.on_text(_text, end="\n", verbose=True)

        if self.streaming:
            # 流式请求
            stream_iter = self._stream(
                messages, stop=stop, run_manager=run_manager, **kwargs
            )
            return generate_from_stream(stream_iter)
        else:
            # 非流失请求
            message_dicts, params = self._create_message_dicts(messages, stop)
            params = {**params, **kwargs}
            # volcenginesdkarkruntime.resources.chat.completions.Completions.create
            chat_completion = self.client.chat.completions.create(messages=message_dicts, **params)
            return self._create_chat_result(chat_completion)

    def _create_message_dicts(
            self, messages: List[BaseMessage], stop: Optional[List[str]]
    ) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
        """构建 message 参数"""
        params = self._default_params
        if stop is not None:
            params["stop"] = stop
        message_dicts = [_convert_message_to_dict(m) for m in messages]
        return message_dicts, params

    def _create_chat_result(
            self, response: ChatCompletion
    ) -> ChatResult:
        """非流式结果处理"""
        generations = []
        for choice in response.choices:
            message = _convert_result_to_message(choice.message)
            generation_info = dict(finish_reason=choice.finish_reason)
            generation_info["logprobs"] = choice.logprobs
            gen = ChatGeneration(
                message=message,
                generation_info=generation_info,
            )
            generations.append(gen)
        llm_output = {
            "token_usage": response.usage,
            "model_name": self.model,
        }
        return ChatResult(generations=generations, llm_output=llm_output)

    def _stream(
            self,
            messages: List[BaseMessage],
            stop: Optional[List[str]] = None,
            run_manager: Optional[CallbackManagerForLLMRun] = None,
            **kwargs: Any,
    ) -> Iterator[ChatGenerationChunk]:
        """流式处理"""
        message_dicts, params = self._create_message_dicts(messages, stop)
        params = {**params, **kwargs, "stream": True}
        stream = self.client.chat.completions.create(messages=message_dicts, **params)
        for chunk in stream:
            if not isinstance(chunk, ChatCompletionChunk):
                raise ValueError("chunk is not ChatCompletionChunk")
            if len(chunk.choices) == 0:
                print("token 使用情况：" + str(chunk.usage))
                if chunk.usage:
                    usage_metadata = {
                        "input_tokens": chunk.usage["prompt_tokens"],
                        "output_tokens": chunk.usage["completion_tokens"],
                        "total_tokens": chunk.usage["total_tokens"],
                    }
                    chunk = ChatGenerationChunk(
                        message=AIMessageChunk(  # type: ignore[call-arg]
                            content="", usage_metadata=usage_metadata
                        )
                    )
                    logprobs = None
                else:
                    continue

            else:
                choice = chunk.choices[0]
                if choice.delta is None:
                    continue
                message_chunk = _convert_delta_to_message_chunk(choice.delta)
                generation_info = {}
                if finish_reason := choice.finish_reason:
                    generation_info["finish_reason"] = finish_reason
                logprobs = choice.logprobs
                if logprobs:
                    generation_info["logprobs"] = logprobs
                chunk = ChatGenerationChunk(
                    message=message_chunk, generation_info=generation_info or None
                )
            if run_manager:
                run_manager.on_llm_new_token(
                    chunk.text, chunk=chunk, logprobs=logprobs
                )
            yield chunk

    def generate_prompt(
            self,
            prompts: List[PromptValue],
            stop: Optional[List[str]] = None,
            callbacks: Callbacks = None,
            **kwargs: Any,
    ) -> LLMResult:
        prompt_messages = [p.to_messages() for p in prompts]
        return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)
