import os
import time

from .llm_serve import LLMBase
from .llm_serve.vicuna import load_model, get_generate_stream_function, get_context_length, new_chat, SimpleChatIO, \
    reload_conv


class VicunaConfig:
    model_path = '/usr/pigcha/vicuna-7b-v1.5'

    device = 'cuda'
    gpus = '0,1,2'
    num_gpus = 3

    os.environ["CUDA_VISIBLE_DEVICES"] = gpus
    os.environ["XPU_VISIBLE_DEVICES"] = gpus

    multiline = False

    max_gpu_memory = None
    dtype = None
    load_8bit = False
    cpu_offloading = False
    revision = 'main'
    debug = False
    history = False

    repetition_penalty = 1.0
    conv_template = None
    conv_system_msg = None
    temperature = 0.7
    max_new_tokens = 512
    judge_sent_end = False

    use_fast_tokenizer = False


class Vicuna(LLMBase):
    _instance = None

    def __new__(cls, *args, **kwargs):
        if not cls._instance:
            cls._instance = super().__new__(cls)
        return cls._instance

    def __init__(self, model_path=VicunaConfig.model_path,
                 device=VicunaConfig.device,
                 gpus=VicunaConfig.gpus,
                 num_gpus=VicunaConfig.num_gpus,
                 multiline=VicunaConfig.multiline,
                 max_gpu_memory=VicunaConfig.max_gpu_memory,
                 dtype=VicunaConfig.dtype,
                 load_8bit=VicunaConfig.load_8bit,
                 cpu_offloading=VicunaConfig.cpu_offloading,
                 revision=VicunaConfig.revision,
                 debug=VicunaConfig.debug,
                 history=VicunaConfig.history,
                 repetition_penalty=VicunaConfig.repetition_penalty,
                 conv_template=VicunaConfig.conv_template,
                 conv_system_msg=VicunaConfig.conv_system_msg,
                 temperature=VicunaConfig.temperature,
                 max_new_tokens=VicunaConfig.max_new_tokens,
                 judge_sent_end=VicunaConfig.judge_sent_end,
                 use_fast_tokenizer=VicunaConfig.use_fast_tokenizer):
        self.model_path = model_path

        self.device = device
        self.gpus = gpus
        self.num_gpus = num_gpus

        os.environ["CUDA_VISIBLE_DEVICES"] = self.gpus
        os.environ["XPU_VISIBLE_DEVICES"] = self.gpus

        self.multiline = multiline

        self.max_gpu_memory = max_gpu_memory
        self.dtype = dtype
        self.load_8bit = load_8bit
        self.cpu_offloading = cpu_offloading
        self.revision = revision
        self.debug = debug
        self.history = history

        self.repetition_penalty = repetition_penalty
        self.conv_template = conv_template
        self.conv_system_msg = conv_system_msg
        self.temperature = temperature
        self.max_new_tokens = max_new_tokens
        self.judge_sent_end = judge_sent_end

        self.use_fast_tokenizer = use_fast_tokenizer

        self.chatio = SimpleChatIO(self.multiline)

        # Model
        self.model, self.tokenizer = load_model(
            model_path,
            device=self.device,
            num_gpus=self.num_gpus,
            max_gpu_memory=self.max_gpu_memory,
            dtype=self.dtype,
            load_8bit=self.load_8bit,
            cpu_offloading=self.cpu_offloading,
            revision=self.revision,
            debug=self.debug,
        )

        self.generate_stream_func = get_generate_stream_function(self.model, model_path)

        # Set context length
        self.context_len = get_context_length(self.model.config)

        self.conv = None

    def chat(self, input):
        if not self.history or not self.conv:
            # print('-' * 10, flush=True)
            # print('new_chat in inference.chat_loop', flush=True)
            self.conv = new_chat()

        # print('-' * 10, flush=True)
        # print('conv in inference.chat_loop', self.conv, flush=True)
        self.conv.append_message(self.conv.roles[0], input)
        self.conv.append_message(self.conv.roles[1], None)
        prompt = self.conv.get_prompt()

        gen_params = {
            "model": self.model_path,
            "prompt": prompt,
            "temperature": self.temperature,
            "repetition_penalty": self.repetition_penalty,
            "max_new_tokens": self.max_new_tokens,
            "stop": self.conv.stop_str,
            "stop_token_ids": self.conv.stop_token_ids,
            "echo": False,
        }

        try:
            output_stream = self.generate_stream_func(
                self.model,
                self.tokenizer,
                gen_params,
                self.device,
                context_len=self.context_len,
                judge_sent_end=self.judge_sent_end,
            )

            # print('-' * 10, flush=True)
            # print('output_stream in llm.vicuna', output_stream, flush=True)

            t = time.time()
            outputs = self.chatio.stream_output(output_stream)
            print('outputs in llm.vicuna', outputs, flush=True)

            duration = time.time() - t
            self.conv.update_last_message(outputs.strip())

            if self.debug:
                num_tokens = len(self.tokenizer.encode(outputs))
                msg = {
                    "conv_template": self.conv.name,
                    "prompt": prompt,
                    "outputs": outputs,
                    "speed (token/s)": round(num_tokens / duration, 2),
                }
                print(f"\n{msg}\n", flush=True)

            return outputs

        except Exception as e:
            print(f"stopped generation: {str(e)}", flush=True)
            # If generation didn't finish
            if self.conv.messages[-1][1] is None:
                self.conv.messages.pop()
                # Remove last user message, so there isn't a double up
                if self.conv.messages[-1][0] == self.conv.roles[0]:
                    self.conv.messages.pop()

                reload_conv(self.conv)

            return ''
