from typing import Any, AsyncIterator, Dict, Iterator, List, Optional
from langchain_core.callbacks import (
    AsyncCallbackManagerForLLMRun,
    CallbackManagerForLLMRun,
)
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.runnables import run_in_executor

from bigdl.llm.transformers import AutoModelForCausalLM
from transformers import AutoTokenizer
import torch


class BaichuanChatModel(BaseChatModel):
    model_path = "Chat/Baichuan2-13B-Chat"
    model_low_bit_path = "Chat/Baichuan2-7B-Chat-4bit"
    model = AutoModelForCausalLM.load_low_bit(model_low_bit_path,
                                              trust_remote_code=True)
    tokenizer = AutoTokenizer.from_pretrained(model_path,
                                              trust_remote_code=True)

    def _generate(
            self,
            messages: List[BaseMessage],
            stop: Optional[List[str]] = None,
            run_manager: Optional[CallbackManagerForLLMRun] = None,
            **kwargs: Any,
    ) -> ChatResult:
        """Override the _generate method to implement the chat model logic.

        This can be a call to an API, a call to a local model, or any other
        implementation that generates a response to the input prompt.

        Args:
            messages: the prompt composed of a list of messages.
            stop: a list of strings on which the model should stop generating.
                  If generation stops due to a stop token, the stop token itself
                  SHOULD BE INCLUDED as part of the output. This is not enforced
                  across models right now, but it's a good practice to follow since
                  it makes it much easier to parse the output of the model
                  downstream and understand why generation stopped.
            run_manager: A run manager with callbacks for the LLM.
        """
        history = [{"role": "system", "content": None}]
        with torch.inference_mode():
            for i, message in enumerate(messages):
                history[i]["content"] = message.content
                if i % 2 == 0:
                    history += [{"role": "user", "content": None}]
                else:
                    history += [{"role": "assistant", "content": None}]
        history.pop()
        print(history)
        tokens = self.model.chat(self.tokenizer, history)
        message = AIMessage(content=tokens)
        generation = ChatGeneration(message=message)
        return ChatResult(generations=[generation])

    def _stream(
            self,
            messages: List[BaseMessage],
            stop: Optional[List[str]] = None,
            run_manager: Optional[CallbackManagerForLLMRun] = None,
            **kwargs: Any,
    ) -> Iterator[ChatGenerationChunk]:
        """Stream the output of the model.

        This method should be implemented if the model can generate output
        in a streaming fashion. If the model does not support streaming,
        do not implement it. In that case streaming requests will be automatically
        handled by the _generate method.

        Args:
            messages: the prompt composed of a list of messages.
            stop: a list of strings on which the model should stop generating.
                  If generation stops due to a stop token, the stop token itself
                  SHOULD BE INCLUDED as part of the output. This is not enforced
                  across models right now, but it's a good practice to follow since
                  it makes it much easier to parse the output of the model
                  downstream and understand why generation stopped.
            run_manager: A run manager with callbacks for the LLM
        """
        history = [{"role": "system", "content": None}]
        with torch.inference_mode():
            for i, message in enumerate(messages):
                history[i]["content"] = message.content
                if i % 2 == 0:
                    history += [{"role": "user", "content": None}]
                else:
                    history += [{"role": "assistant", "content": None}]
            history.pop()
            print(history)
            for response in self.model.chat(self.tokenizer, history, stream=True):
                chunk = ChatGenerationChunk(message=AIMessageChunk(content=response))
                if run_manager:
                    run_manager.on_llm_new_token(response, chunk=chunk)

                yield chunk

    async def _astream(
            self,
            messages: List[BaseMessage],
            stop: Optional[List[str]] = None,
            run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
            **kwargs: Any,
    ) -> AsyncIterator[ChatGenerationChunk]:
        """An async variant of astream.

        If not provided, the default behavior is to delegate to the _generate method.

        The implementation below instead will delegate to `_stream` and will
        kick it off in a separate thread.

        If you're able to natively support async, then by all means do so!
        """
        result = await run_in_executor(
            None,
            self._stream,
            messages,
            stop=stop,
            run_manager=run_manager.get_sync() if run_manager else None,
            **kwargs,
        )
        for chunk in result:
            yield chunk

    @property
    def _llm_type(self) -> str:
        """Get the type of language model used by this chat model."""
        return "baichuan-chat-model"

    @property
    def _identifying_params(self) -> Dict[str, Any]:
        """Return a dictionary of identifying parameters."""
        return {"model_path": self.model_path, "model_low_bit_path": self.model_low_bit_path, "model": self.model,
                "tokenizer": self.tokenizer}
