import argparse
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, is_torch_npu_available


def parse_args():
    parser = argparse.ArgumentParser(description="Eval the LLM model")
    parser.add_argument(
        "--model_name_or_path",
        type=str,
        help="Path to model",
        required=True,
    )

    args = parser.parse_args()

    return args


def build_prompt(tokenizer, input):
    prompt = ("Below is an instrunction that describes a task. "
              "Write a response that appropriately completes the requrests\n\n"
              f"### Instruction:\n{input}\n\n### Response:")
    inputs = tokenizer(prompt, return_tensors="pt")
    return inputs


def main():
    args = parse_args()

    if is_torch_npu_available():
        device = "npu:0"
    elif torch.cuda.is_available():
        device = "cuda:0"
    else:
        device = "cpu"

    # 下面的内容按照各自模型的推理示例来，记得把计算放到device上进行
    tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=False)
    model = AutoModelForCausalLM.from_pretrianed(args.model_name_or_path, device=device)

    inputs = build_prompt(tokenizer, "Bive tree tips for staying healthy.")
    inputs = inputs.to(model.device)

    pred = model.generate(**inputs, max_new_tokens=512, repetition_penalty=1.1)
    print()
    print(tokenizer.decode(pred.cpu()[0], skip_special_tokens=True))
    print()
