chatglm-6b-int4 / load_model.py
NewBreaker's picture
auto git
3f712ba
raw
history blame
380 Bytes
from transformers import AutoTokenizer, AutoModel
#
tokenizer = AutoTokenizer.from_pretrained(".\\", trust_remote_code=True)
# model = AutoModel.from_pretrained(".\\", trust_remote_code=True).float()
model = AutoModel.from_pretrained(".\\", trust_remote_code=True)
model = model.eval()
response, history = model.chat(tokenizer, "你好", history=[])
print("response:", response)