# train reward model
from dataclasses import dataclass, field
from typing import Optional

import torch
from datasets import load_dataset, concatenate_datasets
from transformers import HfArgumentParser, AutoModelForSequenceClassification, AutoTokenizer
from trl import RewardTrainer, RewardConfig

"""
数据集

中文 liyucheng/zhihu_rlhf_3k

"""

@dataclass
class TrainConfig:
    model_name_or_path: Optional[str] = field(
        default="deepseek-ai/deepseek-llm-7b-base",
        metadata={"help": "Model checkpoint for weights initialization."})


def main(args: TrainConfig):
    en_dataset = load_dataset('statsmind/hh_rlhf_en')
    cn_dataset = load_dataset('statsmind/hh_rlhf_cn')
    dataset = concatenate_datasets([en_dataset, cn_dataset])

    model_name_or_path = "models/deepseek-ll-7b-instruct"
    model = AutoModelForSequenceClassification.from_pretrained(
        model_name_or_path,
        device_map='auto',
        torch_dtype=torch.bfloat16
    )
    tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)

    training_args = RewardConfig(
        output_dir='./models/deepseek-ll-7b-reward',
        fp16=True
    )

    trainer = RewardTrainer(
        model=model,
        args=training_args,
        processing_class=tokenizer,
        train_dataset=dataset['train'],
        eval_dataset=dataset['test'],
    )

    trainer.train()


if __name__ == "__main__":
    parser = HfArgumentParser((TrainConfig))
    train_args = parser.parse_args_into_dataclasses()[0]
    main(train_args)
