# import torch_npu
from transformers import AutoTokenizer
from transformers import T5Tokenizer, T5ForConditionalGeneration

# 加载模型
model_dir='../torch_model'
tokenizer = T5Tokenizer.from_pretrained(model_dir)
# 如显存不够，可采用以下方式加载，进一步减少显存需求，约为3G
model = T5ForConditionalGeneration.from_pretrained(model_dir).half()
print('finished loading model')


def preprocess(text):
  text = text.replace("\n", "\\n").replace("\t", "\\t")
  return text

def postprocess(text):
  return text.replace("\\n", "\n").replace("\\t", "\t").replace('%20','  ')

def answer(text, sample=True, top_p=0.9, temperature=0.7, context = ""):
    """sample：是否抽样。生成任务，可以设置为True;
    top_p：0-1之间，生成的内容越多样"""
    text = f"{context}\n用户：{text}\n小元："
    text = text.strip()
    text = preprocess(text)
    encoding = tokenizer(text=[text], truncation=True, padding=True, max_length=1024, return_tensors="pt")
    if not sample:
        out = model.generate(**encoding, return_dict_in_generate=True, output_scores=False, max_new_tokens=1024, num_beams=1, length_penalty=0.6)
    else:
        out = model.generate(**encoding, return_dict_in_generate=True, output_scores=False, max_new_tokens=1024, do_sample=True, top_p=top_p, temperature=temperature, no_repeat_ngram_size=12)
    out_text = tokenizer.batch_decode(out["sequences"], skip_special_tokens=True)
    return postprocess(out_text[0])

# 进行多轮对话
history = []
while True:
    query = input("\n用户：")
    context = "\n".join(history[-5:])
    response = answer(query, context=context)
    history.append(f"用户：{query}\n小元：{response}")
    print(f"小元：{response}")


