
import os
import time
from threading import Thread
from typing import Optional, Callable, List, Dict
from typeassert import typeassert

import torch
from transformers import PreTrainedTokenizer, TextIteratorStreamer
from modelscope import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
from airllm import AutoModel as AirAutoModel

class TextIteratorStreamerWrapper(TextIteratorStreamer):

    @typeassert(
        skip_prompt=bool,
        timeout=Optional[float],
        finish_callback=Optional[Callable],
    )
    def __init__(
        self,
        tokenizer: PreTrainedTokenizer,
        skip_prompt: bool = False,
        timeout: Optional[float] = None,
        finish_callback: Optional[Callable] = None,
        **decode_kwargs
    ):
        super().__init__(tokenizer, skip_prompt, timeout, **decode_kwargs)
        self.finish_callback = finish_callback


    def __next__(self):
        value = self.text_queue.get(timeout=self.timeout)
        if value == self.stop_signal:
            if self.finish_callback is not None:
                self.finish_callback()
            raise StopIteration()
        else:
            return value


class GenerateTextConfig():

    @typeassert(
        streaming=bool,
        max_length=Optional[int],
        temperature=Optional[float],
        top_k=Optional[int],
        top_p=Optional[float]
    )
    def __init__(
        self,
        streaming: bool = False,
        max_length: Optional[int] = None,
        temperature: Optional[float] = None,
        top_k: Optional[int] = None,
        top_p: Optional[float] = None
    ):
        self.streaming = streaming
        self.max_length = max_length
        self.temperature = temperature
        self.top_k = top_k
        self.top_p = top_p


class LanguageModelPredictor():

    __model: any
    __tokenizer: PreTrainedTokenizer
    __loaded: bool
    __generating: bool

    def __init__(self, model_path, dtype=torch.bfloat16, use_airllm=False):
        self.__model = None
        self.__tokenizer = None
        self.__loaded = False
        self.__generating = False
        self.load(model_path, dtype, use_airllm)
    def __del__(self):
        self.__generating = False
        self.release()

    @property
    def tokenizer(self):
        return self.__tokenizer

    @property
    def eos_token(self):
        return self.__tokenizer.eos_token
    @property
    def eos_token_id(self):
        return self.__tokenizer.eos_token_id


    def load(self, model_path, dtype, use_airllm=False):
        if self.__loaded:
            self.release()
        model_name = os.path.basename(model_path)
        print(time.strftime(" %Y-%m-%d %H:%M:%S ", time.localtime()), end=f"初始化 tokenizer for {model_name}\n")
        self.__tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
        print(time.strftime(" %Y-%m-%d %H:%M:%S ", time.localtime()), end=f"tokenizer for {model_name} 初始化完毕\n")

        print(time.strftime(" %Y-%m-%d %H:%M:%S ", time.localtime()), end=f"初始化 model for {model_name}\n")
        if "awq" in model_name.lower():
            dtype = torch.float16
        if use_airllm:
            self.__model = AirAutoModel.from_pretrained(
                model_path,
                dtype=dtype,
                device="cuda" if torch.cuda.is_available() else "cpu",
            )
        else:
            self.__model = AutoModelForCausalLM.from_pretrained(
                model_path,
                trust_remote_code=True,
                torch_dtype=dtype,
                device_map="auto",
            )
        if torch.cuda.is_available():
            self.__model = self.__model.to("cuda")
        self.__model.generation_config = GenerationConfig.from_pretrained(model_path)
        if "deepseek" in model_name.lower():
            self.__model.generation_config.pad_token_id = self.__model.generation_config.eos_token_id
        print(time.strftime(" %Y-%m-%d %H:%M:%S ", time.localtime()), end=f"model for {model_name} 初始化完毕\n")

    def release(self):
        if self.__generating:
            raise Exception("Error: 正在生成文本，请等待完成。")

        if self.__tokenizer is not None:
            del self.__tokenizer
        if self.__model is not None:
            del self.__model
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
        self.__loaded = False

    @torch.inference_mode()
    @typeassert(max_new_tokens=int, config=GenerateTextConfig)
    def generate_text(
        self,
        message: List[Dict[str, str]] | List[List[Dict[str, str]]],
        max_new_tokens=512,
        config: GenerateTextConfig = GenerateTextConfig()
    ):
        if self.__tokenizer is None or self.__model is None:
            raise Exception("Error: 模型尚未加载，请先加载模型。")

        self.__generating = True
        # print(message)
        text = self.__tokenizer.apply_chat_template(
            message,
            tokenize=False,
            add_generation_prompt=True,
        )
        inputs = self.__tokenizer(text, return_tensors="pt").to(self.__model.device)

        args = dict(
            eos_token_id=self.__tokenizer.eos_token_id,
            max_new_tokens=max_new_tokens,
            use_cache=True,
        )
        if config.max_length is not None:
            args["max_length"] = config.max_length
        if config.temperature is not None:
            args["temperature"] = config.temperature
        if config.top_k is not None:
            args["top_k"] = config.top_k
        if config.top_p is not None:
            args["top_p"] = config.top_p

        if config.streaming:
            def set_generating_false():
                self.__generating = False

            streamer = TextIteratorStreamerWrapper(
                self.__tokenizer,
                skip_prompt=True,
                finish_callback=set_generating_false,
            )
            args["input_ids"] = inputs.input_ids
            args["attention_mask"] = inputs.attention_mask
            args["streamer"] = streamer
            Thread(target=self.__model.generate, kwargs=args).start()
            return streamer
        else:
            output_ids = self.__model.generate(**inputs, **args)
            generated_ids = output_ids[0][len(inputs.input_ids[0]):]
            result = self.__tokenizer.decode(
                generated_ids,
                skip_special_tokens=True
            )
            self.__generating = False
            return result
