import gc
import logging
from itertools import chain

import torch
from openai import OpenAI
from transformers import AutoModelForCausalLM, AutoTokenizer

log = logging.getLogger(__name__)


def format_llama2_prompt(user_prompt, system_prompt):
    return f"<s>[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n\n{user_prompt} [/INST] "


class ModelManager:
    def __init__(self, args, config) -> None:
        self.args = args
        self.base_url = config.base_url
        self.online_key = config.online_key
        self.client = OpenAI(api_key=self.online_key, base_url=self.base_url)
        self.unload_model()
        if not args.debug:
            if args.model_path:
                self.load_model(args.model_path)
            else:
                self.load_model(config.model_paths[config.default_model])

    @staticmethod
    def is_offline(model_path):
        return not model_path.startswith("!online/")

    def load_model(self, model_path):
        if self.is_offline(model_path):
            log.info(f'Loading model from "{model_path}"')
            self.tokenizer = AutoTokenizer.from_pretrained(
                model_path, trust_remote_code=True
            )
            if self.args.gpu >= 0:
                model = AutoModelForCausalLM.from_pretrained(
                    model_path, torch_dtype="auto", trust_remote_code=True
                )
                dev = torch.device(f"cuda:{self.args.gpu}")
                self.model = model.to(dev)
            else:
                self.model = AutoModelForCausalLM.from_pretrained(
                    model_path,
                    device_map="auto",
                    torch_dtype="auto",
                    trust_remote_code=True,
                )
            self.model = self.model.eval()
        else:
            self.model = model_path.rpartition("/")[2]
            self.online = True
            log.info(f"Using online model {self.model}")
        self.current = model_path

    def unload_model(self):
        self.model = None
        self.tokenizer = None
        self.current = None
        self.online = False
        gc.collect()
        torch.cuda.empty_cache()

    def update_model(self, model_path):
        if model_path != self.current:
            self.unload_model()
            self.load_model(model_path)

    @torch.no_grad()
    def __call__(self, sys_prompt, prompt, max_new_tokens, msg_history):

        if self.online:
            gen, new_msgs = self.online_call(
                sys_prompt, prompt, max_new_tokens, msg_history
            )
        else:
            if sys_prompt == "":
                # no format
                p = prompt.strip()
            else:
                p = format_llama2_prompt(prompt.strip(), sys_prompt)
            input_ids = self.tokenizer.encode(p, return_tensors="pt").to(
                self.model.device
            )
            output = self.model.generate(
                input_ids, do_sample=True, max_new_tokens=max_new_tokens
            )
            gen = self.tokenizer.decode(output[0], skip_special_tokens=True)
            inst_end = gen.find("[/INST]")
            if (inst_end := gen.find("[/INST]")) != -1:
                gen = gen[inst_end + 7 :].strip()
            elif (input_end := gen.find(prompt)) != -1:
                gen = gen[input_end + len(prompt) :].strip()
            new_msgs = []  # TODO: implement offline message histories

        return gen.strip(), new_msgs

    def online_call(self, sys_prompt, query, max_new_tokens, message_history: list):
        assert self.online, f"Model {self.model} is not online."
        assert isinstance(self.model, str), f"Model ({self.model}) is not a string."
        new_messages = [{"role": "system", "content": sys_prompt}] if sys_prompt else []
        if message_history:
            new_messages.extend(
                filter(lambda m: m["role"] != "system", message_history)
            )
        new_messages.append({"role": "user", "content": query})

        # call the API
        resp = self.client.chat.completions.create(
            model=self.model,
            messages=new_messages,
            max_tokens=max_new_tokens,
        )

        # check for errors
        if isinstance(resp, str):
            log.error(resp)
            return f"Error:\n\n{resp}", message_history
        if resp.choices is None:
            log.error(resp)
            return f"Error:\n\n{resp.error['message']}", message_history

        resp_text = resp.choices[0].message.content
        new_messages.append({"role": "assistant", "content": resp_text})
        return resp_text, new_messages
