# 导入必要的库
import torch
from modelscope import AutoTokenizer, AutoModel
from peft import PeftModel, PeftConfig

# 定义模型和预训练模型的路径
model_dir = "C:\\Users\\16014\\.cache\\modelscope\\hub\\models\\ZhipuAI\\chatglm3-6b"
peft_model_id = "./lora_saver/lora_query_key_value"

# 在不计算梯度的情况下进行模型加载和预处理
with torch.no_grad():
    tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
    model = AutoModel.from_pretrained(model_dir, trust_remote_code=True).half().cuda()

model = PeftModel.from_pretrained(model, peft_model_id)
model.eval()

history = []
role = "user"

while True:
    # 通过键盘接收用户输入
    query = input("请输入您的问题：")

    # 判断用户是否想退出对话
    if query.lower() == "~":
        break

        # 使用分词器构建聊天输入
    inputs = tokenizer.build_chat_input(query, history=history, role=role)
    inputs = inputs.to('cuda')

    # 定义结束标记和生成参数
    eos_token_id = [tokenizer.eos_token_id, tokenizer.get_command("<|user|>"), tokenizer.get_command("<|observation|>")]
    gen_kwargs = {"max_length": 1200, "num_beams": 1, "do_sample": True, "top_p": 0.8, "temperature": 0.8}

    # 生成输出
    outputs = model.generate(**inputs, **gen_kwargs, eos_token_id=eos_token_id)
    outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):-1]

    # 使用分词器解码输出
    response = tokenizer.decode(outputs)

    # 处理响应，包括去除一些特殊标记和更新对话历史
    response, history = model.process_response(response, history)

    # 打印响应
    print("模型响应：", response)