import os
import torch
import datasets
from transformers import AutoTokenizer
from peft import PeftModel
from trl.models import AutoModelForCausalLMWithValueHead

train_dataset = datasets.load_dataset("BrightXiaoHan/iflytech-heqc-preference", split="train")
eval_dataset = datasets.load_dataset("BrightXiaoHan/iflytech-heqc-preference", split="validation")
# random sample 200 samples for quick test
eval_dataset = eval_dataset.shuffle(seed=42).select(range(200))

MODEL_NAME = "Qwen/Qwen2.5-1.5B"
OUTPUT_DIR = "./output"


tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLMWithValueHead.from_pretrained(os.path.join(OUTPUT_DIR, "checkpoint-18000"))
model = PeftModel.from_pretrained(model, os.path.join(OUTPUT_DIR, "checkpoint-18000"))

# evaluate the model
model.eval()
model.to("cuda")

with torch.no_grad():
    prompt = "情景描述:\n微信聊天，高情商沟通\n我有一句话想对朋友说，但是不知道怎么表达。请帮我优化表达。\n```\n感觉朋友好傻\n```\n请直接输出优化后的表达，不要有解释，标签和额外的内容\n朋友，你咋这么“二”呢😉 就像个天然呆一样，老是干些傻兮兮的事儿。"

    reward = model(**tokenizer(prompt, return_tensors='pt').to("cuda"))

    import pdb; pdb.set_trace()
    pass
