# Copyright 2024 the LlamaFactory team.
# Copyright (c) 2024 Huawei Technologies Co., Ltd.
#
# This code is inspired by the LLaMA-Factory.
# https://github.com/hiyouga/LLaMA-Factory/blob/main/src/src/llamafactory/data/processors/supervised.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import List, Dict, Any
from collections import defaultdict

from openmind.archived.models.auto import AutoTokenizer
from openmind.utils import get_logger
from openmind.flow.datasets.template import Template

logger = get_logger(__name__)  # pylint: disable=invalid-name


def preprocess_pairwise_dataset(
    examples, template: Template, tokenizer: AutoTokenizer, cutoff_len: int
) -> Dict[str, List[Any]]:
    model_inputs = defaultdict(list)
    for i in range(len(examples["_prompt"])):
        if len(examples["_prompt"][i]) % 2 != 1 or len(examples["_response"][i]) != 1:
            logger.warning("Dropped invalid example: {}".format(examples["_prompt"][i] + examples["_response"][i]))
            continue
        model_inputs["prompt"].append(examples["_prompt"][i])
        model_inputs["chosen"].append([examples["_response"][i][0][0]])
        model_inputs["rejected"].append([examples["_response"][i][0][1]])

    return model_inputs
