# coding=utf-8
import torch
from transformers import AutoTokenizer, AutoModel
import os

cache_dir = '/home/yuhaiyang/llm/lm_cache'

model_name = 'hfl/rbt3'
model_name = 'THUDM/chatglm-6b'


tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir, trust_remote_code=True)
model = AutoModel.from_pretrained(model_name, cache_dir=cache_dir, trust_remote_code=True).half()
model = model.to('cuda')

print(model, tokenizer)


history = []
print("starting")
while True:
    query = input("\n用户：")
    if query == "stop":
        break
    if query == "clear":
        history = []
        os.system('clear')
        continue
    response, history = model.chat(tokenizer, query, history=history)
    print(f"chatGLM：{response}")
    print('\n\注：输入stop停止，输入clear清除历史\n\n')
