from chatllm.chatllm import ChatGML,Vicuna,LLaMABase,DevLLM,Guanaco,Parameters
from utils.contants import LLM_DEVICE


class ChatIO():
    model_name_or_path: str = None
    lora_model_name_or_path: str = None
    tokenizer: object = None
    model: object = None
    deployMode = True
    llm_device = LLM_DEVICE

    def generate(self,parameters:Parameters) -> str:
        chatllm: LLaMABase = self.get_chatllm()
        return chatllm.generate(parameters)
    def unload(self ):
        self.lora_model_name_or_path=None
        self.model_name_or_path=None
        self.model = None
        self.tokenizer = None

    def get_chatllm(self) -> LLaMABase:
        chatllm: LLaMABase = None
        if not self.deployMode:
            chatllm = DevLLM(self)
        elif 'chatglm' in self.model_name_or_path.lower():
            chatllm = ChatGML(self)
        elif 'vicuna' in self.model_name_or_path.lower():
            chatllm = Vicuna(self)
        elif 'guanaco' in self.model_name_or_path.lower():
            chatllm = Guanaco(self)
        else:
            pass
        return chatllm

    def load_model(self,
                 llm_device=LLM_DEVICE,
                 onload: object = None):
        self.llm_device = llm_device
        chatllm: LLaMABase = self.get_chatllm()
        chatllm.load_model(onload)
        if chatllm is not None and self.model is not None:
            self.model.to(llm_device)
            self.model.eval()