import os
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig

tokenizer = AutoTokenizer.from_pretrained('../models/12B', trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained('../models/12B', trust_remote_code=True, torch_dtype=torch.float16)
device = "cpu"
model.to(device)
generate_config = GenerationConfig.from_pretrained('../models/12B')
question="你好！你是谁？"
gen = model.chat(tokenizer = tokenizer, question=question, history=[], generation_config=generate_config, stream=True)
for answer, history in gen:
    print(answer)

