f5   rom datasets import load_dataset
import evaluate
# from datasets import load_dataset, load_metric
from transformers import (
    AutoModelForSequenceClassification,
    AutoTokenizer,
    GPT2Tokenizer,
    TrainingArguments,
    Trainer,
    DataCollatorWithPadding,
)
import numpy as np

# from peft import prepare_model_for_kbit_training
# from peft import LoraConfig, get_peft_model


def tokenize(examples):
    outputs = tokenizer(examples["text"], truncation=True, padding=True)
    return outputs


# def compute_metrics(eval_preds):
#     metric = evaluate.load('accuracy')
#     logits, labels = eval_preds
#     predictions = np.argmax(logits, axis=-1)
#     return metric.compute(predictions=predictions, references=labels)


model_name = r"/root/autodl-tmp/DeepSpeedExamples/applications/fcrlhf/DeepSpeed-Chat/models/opt-350m"

model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2)

tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token
print(tokenizer.pad_token)
print(tokenizer.eos_token)
# tokenizer.padding_side = "left"
tokenizer.padding_side = 'right'
model.config.pad_token_id = model.config.eos_token_id

ds = load_dataset(r"/root/autodl-tmp/DeepSpeedExamples/applications/fcrlhf/DeepSpeed-Chat/datasets/IMDb_movie_reviews")
# tokenized_ds = ds.map(tokenize, batched=True)
tokenized_ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, max_length=1024), batched=True)

print(tokenized_ds["train"][0])

training_args = TrainingArguments(
    num_train_epochs=1,
    output_dir=r"/root/autodl-tmp/DeepSpeedExamples/applications/fcrlhf/DeepSpeed-Chat/training/step2_reward_model_finetuning/imdb_rm",
    # push_to_hub=True,
    per_device_train_batch_size=1,
    per_device_eval_batch_size=1,
    evaluation_strategy="epoch",
    save_strategy="steps",  # 按步数保存
    save_steps=10000,  # 每 1000 步保存一次
    save_total_limit=1,  # 最多保留 3 个 checkpoint，自动删除旧的
    logging_dir="./run",  # TensorBoard 日志目录
)

data_collator = DataCollatorWithPadding(tokenizer)

trainer = Trainer(
    model=model,
    tokenizer=tokenizer,
    data_collator=data_collator,
    args=training_args,
    train_dataset=tokenized_ds["train"],
    eval_dataset=tokenized_ds["test"],
    # compute_metrics=compute_metrics,
)

trainer.train() 
trainer.save_model(r"/root/autodl-tmp/DeepSpeedExamples/applications/DeepSpeed-Chat/training/step2_reward_model_finetuning/imdb_reward_model")
print("training complete")
print(trainer.model)
# trainer.push_to_hub()