import torch
from datasets import Dataset
import json

from peft import LoraConfig, TaskType, get_peft_model, prepare_model_for_kbit_training
from modelscope import AutoTokenizer, BitsAndBytesConfig, AutoModelForSequenceClassification
from trl import RewardTrainer, RewardConfig
import config

model_path = config.model_name
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
tokenizer.padding_side = "right"
tokenizer.pad_token = tokenizer.eos_token

bnb_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16)
model = AutoModelForSequenceClassification.from_pretrained(model_path, num_labels=1, quantization_config=bnb_config)
model.config.pad_token_id = tokenizer.pad_token_id

peft_config = LoraConfig(r = 2, target_modules=["q_proj", "v_proj", "k_proj"], task_type=TaskType.SEQ_CLS, lora_alpha=16, lora_dropout=0.05)
model = prepare_model_for_kbit_training(model)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()


items = []
with open("./custom_data/preference.json", 'r', encoding='utf-8') as f:
    for line in f:
        items.append(json.loads(line))
dataset = Dataset.from_list(items)

def process_func(example):
    question = example["question"]
    chosen = question + example["chosen"]
    rejected = question + example["rejected"]

    tokenized_chosen = tokenizer(chosen)
    tokenized_rejected = tokenizer(rejected)

    new_example = {
        "input_ids_chosen": tokenized_chosen["input_ids"],
        "attention_mask_chosen": tokenized_chosen["attention_mask"],
        "input_ids_rejected": tokenized_rejected["input_ids"],
        "attention_mask_rejected": tokenized_rejected["attention_mask"],
    }

    return new_example

dataset = dataset.map(process_func, remove_columns=dataset.column_names)
print(dataset)

config = RewardConfig(output_dir="model/reward_config", num_train_epochs=12, per_device_train_batch_size=16)
reward_trainer = RewardTrainer(model=model, args=config, train_dataset=dataset, eval_dataset=dataset, processing_class=tokenizer)
reward_trainer.train()

reward_trainer.save_model("model/reward_model")
