import os

from transformers import AutoTokenizer, AutoModel

from configs.load_configs import ConfigLoader


class LoadChatModel(object):

    def __init__(self):
        self.tokenizer = None
        self.model = None

    # 加载模型
    def load(self):
        config = ConfigLoader.load_chat_config()
        # 模型和分词器路径
        model_path = config["model_path"]
        tokenizer_path = os.environ.get("TOKENIZER_PATH", model_path)
        # 初始化模型和模型分词器
        self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_path, trust_remote_code=True)
        self.model = AutoModel.from_pretrained(model_path, trust_remote_code=True).half().cuda()
        self.model = self.model.eval()

    # 同步返回
    def chat(self, query):
        response = self.model.chat(self.tokenizer, query)
        return response

    # 流试返回
    def stream_chat(self, query, history, callback_function=None):
        past_key_values = None
        current_length = 0

        # 模型生成对话响应
        for response, history, past_key_values in self.model.stream_chat(
                self.tokenizer,
                query,
                history=history,
                top_p=1,  # 采样概率的上限，值越大生成的内容越丰富
                temperature=0.01,  # 温度参数，控制生成内容的多样性，值越小越保守，值越大越随机
                past_key_values=past_key_values,  # 过去的键值，用于在对话中保留先前的信息
                return_past_key_values=True  # 是否返回更新后的过去键值
        ):
            # 打印生成的对话响应
            result = response[current_length:]
            current_length = len(response)
            # print(result, end="", flush=True)
            # 调用回调函数，并传递生成的对话响应
            if callback_function:
                callback_function(result)
