from llmtcg.chat.model import ChatModel, ChatContext
from llmtcg.chat.models.openai import OpenaiCompatibleChatModel

from dataclasses import dataclass, field
from os import path
from typing import TYPE_CHECKING

if TYPE_CHECKING:
    from transformers import PreTrainedModel, PreTrainedTokenizer  # type: ignore


@dataclass
class LocalOpenaiChatModel(OpenaiCompatibleChatModel):

    _api_address: str
    _model_name: str

    @property
    def api_address(self) -> str:
        return self._api_address

    @property
    def model_name(self) -> str:
        return self._model_name

    def __hash__(self) -> int:
        return id(self)


@dataclass
class HFTransformerChatModel(ChatModel):
    model_directory: str

    model: 'PreTrainedModel' = field(init=False)
    tokenizer: 'PreTrainedTokenizer' = field(init=False)

    def __post_init__(self) -> None:
        from transformers import AutoModelForCausalLM, AutoTokenizer
        import torch  # type: ignore
        torch.random.manual_seed(0)

        assert path.isdir(self.model_directory)

        self.model = AutoModelForCausalLM.from_pretrained(
            self.model_directory,
            device_map="cuda",
            torch_dtype="auto",
            trust_remote_code=True,
        )
        self.tokenizer = AutoTokenizer.from_pretrained(self.model_directory)

    def pretty_name(self) -> str:
        return self.model_directory

    def chat(self, context: ChatContext) -> str:
        from transformers import pipeline
        messages = [{"role": "system", "content": context.system_prompt}]
        for role, msg in context.history:
            messages.append({"role": role.value, "content": msg})

        pipe = pipeline(
            "text-generation",
            model=self.model,
            tokenizer=self.tokenizer,
        )

        generation_args = {
            "max_new_tokens": 4096,
            "return_full_text": False,
            "do_sample": False,
        }

        output = pipe(messages, **generation_args)
        generated_text = output[0]['generated_text']
        return generated_text
