from transformers import AutoTokenizer, AutoModel

from .llm_serve import LLMBase

MAX_LENGTH = 8192


class ChatGLM3(LLMBase):
    _instance = None

    def __new__(cls, *args, **kwargs):
        if not cls._instance:
            cls._instance = super().__new__(cls)
        return cls._instance

    def __init__(self, model_path='', history=False):
        self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
        self.model = AutoModel.from_pretrained(model_path, trust_remote_code=True, device_map='auto', max_length=25000)
        self.model = self.model.eval()
        print(self.model, flush=True)
        print(self.tokenizer, flush=True)
        self.history = history
        self.history_messages = []
    #大模型交互
    def chat(self, input):
        # print('-' * 10, flush=True)
        # print(len(input), type(self.history), self.history, input, self.history_messages, flush=True)
        # print('-' * 10, flush=True)
        # print(input, flush=True)
        # print('-' * 10, flush=True)
        # print(self.history_messages, flush=True)
        # print('-' * 10, flush=True)
        if not self.history:
            self.history_messages = []
        gen_kwargs = {"max_length": 25000, "temperature": .1, "top_k": 1, "top_p": .1}
        print(input, flush=True)
        print(len(input), flush=True)
        response, history = self.model.chat(self.tokenizer, input, history=self.history_messages,
                                            **gen_kwargs)
        # print('**self.history**' * 10, flush=True)
        # print(history, flush=True)
        # print('history_messages is', self.history_messages, flush=True)
        return response


if __name__ == '__main__':
    model = ChatGLM3()
    response = model.chat('你的名字')
    print(response, flush=True)
