import transformers
import torch
from transformers import LlamaForCausalLM, LlamaTokenizer
import os
import time

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'

print("[start]")

pretrained_model_dir = r"/gdata/gpu8613/changeWeightTest"
finetuned_model_dir = r"/gdata/gpu8613/changeWeightTest"
tokenizer = LlamaTokenizer.from_pretrained(pretrained_model_dir)
model = LlamaForCausalLM.from_pretrained(finetuned_model_dir)     

# 加载模型
pipeline = transformers.pipeline(
    "text-generation",
    model=model,
    tokenizer=tokenizer,
    torch_dtype=torch.float16,
    device_map="auto",
)
print("[model loaded]")
while True:
    user_input = input("请输入字符串：")
    if user_input == "123":
        print("输入为 '123'，退出循环。")
        break
    # 开始计时
    start_time = time.time()
    # 获取结果
    sequences = pipeline(
        user_input,
        do_sample=True,
        top_k=10, # 生成文本只考虑模型预测的前k个最可能的下一个词;优点是能够减少模型预测的不确定性;如果k值设置过大可能会使模型过于确定缺乏多样性
        num_return_sequences=2,
        eos_token_id=tokenizer.eos_token_id,
        max_length=200,
    )
    # 结束计时
    end_time = time.time()
    eval_time = end_time - start_time
    for seq in sequences:
        print(f"{seq ['generated_text']}")
        
    print(f"total time for eval: {eval_time} s")
    
print("[end]")