import os
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig

#print("token")
tokenizer = AutoTokenizer.from_pretrained('../models/12B', trust_remote_code=True)
#tokenizer = AutoTokenizer.from_pretrained('../models/12B')
#print("model")
model = AutoModelForCausalLM.from_pretrained('../models/12B', trust_remote_code=True, torch_dtype=torch.float16)
#model = AutoModelForCausalLM.from_pretrained('../models/12B', torch_dtype=torch.float16)
device = "cpu"
model.to(device)
#print("config")
generate_config = GenerationConfig.from_pretrained('../models/12B')
question="你好！你是谁？"
#print("chat")
answer, history = model.chat(tokenizer = tokenizer, question=question, history=[], generation_config=generate_config, stream=False)
print(answer)
