from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda:0"  # the device to load the model onto

pretrained_model = r"D:/codes/llm_about/self-llm/zzzzz_train/llama38B/output/llama3_8b_lorad"

model = AutoModelForCausalLM.from_pretrained(
    pretrained_model,
    torch_dtype="auto",
    device_map="auto"
)

tokenizer = AutoTokenizer.from_pretrained(pretrained_model)


def chat(text: str) -> str:
    prompt = f"请针对以下商品名称生成商品简称，并直接给出答案 {text}"
    messages = [
        {"role": "system", "content": "你现在是一个商品名称简称生成机器人"},
        {"role": "user", "content": prompt}
    ]

    input_ids = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
    model_inputs = tokenizer([input_ids], return_tensors="pt").to('cuda')
    generated_ids = model.generate(model_inputs.input_ids, max_new_tokens=512)
    generated_ids = [
        output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
    ]
    response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]

    return response


if __name__ == '__main__':
    while True:
        ipt = input("请输入商品名称：")
        ipt = ipt.strip()
        if ipt == "exit":
            break
        response = chat(ipt)
        print(response)
