# Copyright (c) 2025 Huawei Technologies Co., Ltd.
#
# openMind is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
#          http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
import argparse
import random
from dataclasses import dataclass
from typing import List, Any, Optional, Union, Dict

import torch
from transformers import PreTrainedTokenizerBase, TrainerCallback
import numpy as np
from trl.trainer import reward_trainer

from openmind.flow.arguments import get_args
from openmind.flow.datasets import get_template, get_dataset_module
from openmind.flow.model import get_model, get_tokenizer
from openmind.utils import get_logger

logger = get_logger(__name__)


def setup_seed(seed):
    torch.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)


@dataclass
class RewardDataCollatorWithPadding:
    r"""
    Reward DataCollator class that pads the inputs to the maximum length of the batch.

    Args:
        tokenizer (`PreTrainedTokenizerBase`):
            The tokenizer used for encoding the data.
        padding (`Union[bool, str, `PaddingStrategy`]`, `optional`, defaults to `True`):
            padding_strategy to pass to the tokenizer.
        pad_to_multiple_of (`int` or `None`, `optional`, defaults to `None`):
            If set will pad the sequence to a multiple of the provided value.
        return_tensors (`str`, `optional`, defaults to `"pt"`):
            The tensor type to use.
    """

    tokenizer: PreTrainedTokenizerBase
    args: argparse.Namespace
    padding: Union[bool, str] = True
    pad_to_multiple_of: Optional[int] = None
    return_tensors: str = "pt"

    def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]:
        features_chosen = []
        features_rejected = []
        margin = []
        # check if we have a margin. If we do, we need to batch it as well
        has_margin = "margin" in features[0]
        if self.args.max_length:
            max_length = self.args.max_length
        else:
            max_length = 1024
        for feature in features:
            # check if the keys are named as expected
            keys_exist = (
                "input_ids_chosen" in feature
                and "input_ids_rejected" in feature
                and "attention_mask_chosen" in feature
                and "attention_mask_rejected" in feature
            )
            if not keys_exist:
                raise ValueError(
                    "The features should include `input_ids_chosen`, `attention_mask_chosen`, `input_ids_rejected` and `attention_mask_rejected`"
                )

            features_chosen.append(
                {
                    "input_ids": feature["input_ids_chosen"],
                    "attention_mask": feature["attention_mask_chosen"],
                }
            )
            features_rejected.append(
                {
                    "input_ids": feature["input_ids_rejected"],
                    "attention_mask": feature["attention_mask_rejected"],
                }
            )
            if has_margin:
                margin.append(feature["margin"])
        batch_chosen = self.tokenizer.pad(
            features_chosen,
            padding="max_length",
            max_length=max_length,
            pad_to_multiple_of=self.pad_to_multiple_of,
            return_tensors=self.return_tensors,
        )
        batch_rejected = self.tokenizer.pad(
            features_rejected,
            padding="max_length",
            max_length=max_length,
            pad_to_multiple_of=self.pad_to_multiple_of,
            return_tensors=self.return_tensors,
        )
        batch = {
            "input_ids_chosen": batch_chosen["input_ids"],
            "attention_mask_chosen": batch_chosen["attention_mask"],
            "input_ids_rejected": batch_rejected["input_ids"],
            "attention_mask_rejected": batch_rejected["attention_mask"],
            "return_loss": True,
        }
        if has_margin:
            margin = torch.tensor(margin, dtype=torch.float)
            batch["margin"] = margin
        return batch


def run_rm(
    callbacks: Optional[List["TrainerCallback"]] = None,
):
    args = get_args()
    setup_seed(args.seed)
    tokenizer = get_tokenizer()
    model = get_model()
    model.find_unused_parameters = True

    template = get_template()
    dataset_module = get_dataset_module(tokenizer, template)

    train_args = args.reward_args
    train_args.remove_unused_columns = False

    data_collator = RewardDataCollatorWithPadding(tokenizer=tokenizer, args=args)
    trainer = reward_trainer.RewardTrainer(
        model=model,
        args=train_args,
        data_collator=data_collator,
        callbacks=callbacks,
        processing_class=tokenizer,
        **dataset_module,
    )

    if args.do_train:
        logger.info_rank0("Start training.")
        train_result = trainer.train(resume_from_checkpoint=args.resume_from_checkpoint)
        trainer.save_model()
        trainer.log_metrics("train", train_result.metrics)
        trainer.save_metrics("train", train_result.metrics)
        trainer.save_state()
