# Copyright (c) 2025 Huawei Technologies Co., Ltd.
#
# openMind is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
#          http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.

from openmind.utils.constants import IGNORE_INDEX


def preprocess_sp_dataset(seq_ids, world_size):
    step = len(seq_ids) // world_size
    local_values = [seq_ids[s : s + step] for s in range(0, len(seq_ids), step)]
    return local_values


def pad_sequence(examples, tokenizer, max_length, ignore_pad_token_for_loss):
    input_pad_token_id = tokenizer.pad_token_id
    label_pad_token_id = IGNORE_INDEX if ignore_pad_token_for_loss else tokenizer.pad_token_id

    for k, v in examples.items():
        if k.endswith("input_ids"):
            pad_token_id = input_pad_token_id
        elif k.endswith("labels"):
            pad_token_id = label_pad_token_id
            # shift labels here
            for i in range(len(v)):
                v[i] = v[i][1:]
        elif k.endswith("attention_mask"):
            pad_token_id = 0
        elif k.endswith("position_ids"):
            pad_token_id = max_length - 1  # pad the max position id
        elif k == "images" or k == "videos" or k == "audios":
            pad_token_id = -1
            continue
        else:
            raise NotImplementedError(f"Unexpected dataset key: {k}")
        for i in range(len(v)):
            v[i].extend([pad_token_id] * (max_length - len(v[i])))
        examples[k] = v

    return examples


def sp_split(examples, sequence_parallel_size):
    for k, v in examples.items():
        chunks = list()
        for row in v:
            if k.endswith("attention_mask"):
                chunks.extend([row] * sequence_parallel_size)
            elif row is None:
                chunks.extend([None] * sequence_parallel_size)
            else:
                chunks.extend(preprocess_sp_dataset(row, sequence_parallel_size))
        examples[k] = chunks
    return examples
