# dialog.py
import torch
from transformers import AutoTokenizer, AutoModel

chat_model = "../model/chatglm3-6b"

class DialogModel:
    def __init__(self, model_path=chat_model):  # 修改：参数改为本地模型路径
        # 修改：直接从本地路径加载模型
        self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
        self.model = AutoModel.from_pretrained(model_path, trust_remote_code=True).to("cuda")

        if torch.cuda.is_available():
            self.model = self.model.half().cuda()
        else:
            self.model = self.model.float()
        self.model = self.model.eval()

    def predict(self, text, chat_history, do_sample=True, temperature=0.7, max_length=2500):
        """
        Generate a response using the ChatGLM model and format the chat history.
        Now includes do_sample parameter.
        """
        chatglm_history = []
        if chat_history:
            for human, bot in chat_history:
                chatglm_history.append({"role": "user", "content": human})
                chatglm_history.append({"role": "assistant", "content": bot})

        # Use do_sample in the chat method
        response, updated_history = self.model.chat(
            self.tokenizer,
            text,
            history=chatglm_history,
            temperature=temperature,
            max_length=max_length,
            do_sample=do_sample  # Pass do_sample to the model
        )

        # Update chat history for Gradio
        chat_history.append((text, response))  # Use tuple for consistency
        return chat_history
