# Copyright (c) 2025 Huawei Technologies Co., Ltd.
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import Any, Optional, Union, Callable, List, Dict

import trl
from accelerate import PartialState
from transformers import PreTrainedTokenizerBase


def _tokenize(batch: Dict[str, List[Any]], tokenizer: "PreTrainedTokenizerBase") -> Dict[str, List[Any]]:
    """Tokenize a batch from a reward modelling dataset."""
    new_examples = {
        "input_ids_chosen": [],
        "attention_mask_chosen": [],
        "input_ids_rejected": [],
        "attention_mask_rejected": [],
    }
    for chosen, rejected in zip(batch["chosen"], batch["rejected"]):
        tokenized_chosen = tokenizer(chosen)
        tokenized_rejected = tokenizer(rejected)
        new_examples["input_ids_chosen"].append(tokenized_chosen["input_ids"])
        new_examples["attention_mask_chosen"].append(tokenized_chosen["attention_mask"])
        new_examples["input_ids_rejected"].append(tokenized_rejected["input_ids"])
        new_examples["attention_mask_rejected"].append(tokenized_rejected["attention_mask"])

    return new_examples


def _apply_chat_template(
    example,
    tokenizer: PreTrainedTokenizerBase,
    tools: Optional[List[Union[Dict, Callable]]] = None,
) -> Dict[str, str]:
    new_example = {
        "prompt": example["_prompt"],
        "chosen": [example["_response"][0][0]],
        "rejected": [example["_response"][0][1]],
    }
    return trl.data_utils.maybe_apply_chat_template(tokenizer=tokenizer, example=new_example, tools=tools)


def preprocess_reward_dataset(dataset, tokenizer, args):
    with PartialState().main_process_first():
        fn_kwargs = {"tokenizer": tokenizer}
        max_length = args.max_length
        if dataset is not None:
            dataset = dataset.map(_apply_chat_template, fn_kwargs={"tokenizer": tokenizer})
            dataset = dataset.map(
                _tokenize,
                fn_kwargs=fn_kwargs,
                batched=True,
                num_proc=None,
            )
            # This filter is important because otherwise you get samples that exceed the model's context length and
            # get truncated => noisy signal the chosen/rejected label gets lost. The downside is that the
            # user might get surprised if N samples are missing from training.
            if max_length is not None:
                dataset = dataset.filter(
                    lambda x: len(x["input_ids_chosen"]) <= max_length and len(x["input_ids_rejected"]) <= max_length,
                    num_proc=None,
                )
            return dataset
        return dataset
