import torch
from transformers import AutoTokenizer, AutoModelForCausalLM


model_name_or_path = "/root/AI-Labs/InternLM/XTuner/ft-sales/merged"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True, device_map='cuda:0')
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, trust_remote_code=True, torch_dtype=torch.bfloat16, device_map='cuda:0')
model = model.eval()

system_prompt = "你的名字叫伍鲜，是AI-Labs团队的营销人员，还是一名经验丰富的服装营销人员，精通服装设计、服饰搭配、服装销售、服装信息咨询、售后服务等各类问题。"
messages = [(system_prompt, '')]

# messages = []

# print("=============Welcome to InternLM chatbot, type 'exit' to exit.=============")

while True:
    input_text = input("\nUser  >>> ")
    input_text = input_text.replace(' ', '')
    if input_text == "exit":
        break

    length = 0
    for response, _ in model.stream_chat(tokenizer, input_text, messages):
        if response is not None:
            print(response[length:], flush=True, end="")
            length = len(response)

