from peft import LoraConfig, TaskType
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from trl import RewardTrainer, RewardConfig
import datasets

train_dataset = datasets.load_dataset("BrightXiaoHan/iflytech-heqc-preference", split="train")
eval_dataset = datasets.load_dataset("BrightXiaoHan/iflytech-heqc-preference", split="validation")
# random sample 200 samples for quick test
eval_dataset = eval_dataset.shuffle(seed=42).select(range(1000))

MODEL_NAME = "Qwen/Qwen2.5-1.5B"
OUTPUT_DIR = "./output"
LOG_DIR = "./logs"


model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME)
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
tokenizer.pad_token = tokenizer.eos_token
model.config.pad_token_id = tokenizer.pad_token_id

peft_config = LoraConfig(
    task_type=TaskType.SEQ_CLS,
    inference_mode=False,
    r=8,
    lora_alpha=32,
    lora_dropout=0.1,
)

training_args = RewardConfig(
    output_dir=OUTPUT_DIR,
    num_train_epochs=1,
    per_device_train_batch_size=16,
    per_device_eval_batch_size=16,
    gradient_accumulation_steps=1,
    learning_rate=1e-5,
    logging_dir=LOG_DIR,
    logging_steps=10,
    save_steps=1000,
    eval_steps=1000,
    save_total_limit=1,
    eval_strategy="epoch",
    remove_unused_columns=False,
    metric_for_best_model="eval_loss",
    run_name="heqc_reward",
    bf16=True,
)

trainer = RewardTrainer(
    model=model,
    args=training_args,
    processing_class=tokenizer,
    train_dataset=train_dataset,
    eval_dataset=eval_dataset,
    peft_config=peft_config,
)

trainer.train()
