# Copyright 2024 HuggingFace Inc. and the LlamaFactory team.
# Copyright (c) 2025 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import itertools

import torch
import torch.distributed as dist
from torch.nn import CrossEntropyLoss
from torch.utils.data import Sampler
from transformers import Seq2SeqTrainer
from transformers.trainer import _is_peft_model
from typing_extensions import override

from openmind.flow.arguments import get_args
from openmind.flow.model.sequence_parallel.seq_utils import DistributedTrainingModule
from openmind.utils.version import is_transformers_version_equal_to_4_46


class SequenceParallelSampler(Sampler[int]):
    def __init__(self, data_source, per_device_bs, sequence_parallel_size, world_size):
        self.sequence_parallel_size = sequence_parallel_size
        self.per_device_bs = per_device_bs
        self.num_data = len(data_source) // self.sequence_parallel_size
        self.batch_size = self.per_device_bs * world_size // self.sequence_parallel_size
        self.num_pad_data = (
            ((self.num_data // self.batch_size) + 1) * self.batch_size
            if self.num_data // self.batch_size
            else self.num_data
        )

    def __iter__(self):
        seed = int(torch.empty((), dtype=torch.int64).random_().item())
        generator = torch.Generator()
        generator.manual_seed(seed)
        sample_indices = torch.randperm(self.num_data, generator=generator).tolist()
        sample_indices = sample_indices + sample_indices[: self.num_pad_data - self.num_data]

        shuffle_indices = []
        group = []
        for idx, sample_idx in enumerate(sample_indices):
            group.append([sample_idx * self.sequence_parallel_size + i for i in range(self.sequence_parallel_size)])
            if idx % self.per_device_bs == self.per_device_bs - 1:
                group_for_sp = list(itertools.chain(*list(zip(*group))))
                shuffle_indices.append(group_for_sp)
                group = []
        shuffle_indices = list(itertools.chain(*shuffle_indices))
        yield from shuffle_indices

    def __len__(self):
        return self.num_pad_data * self.sequence_parallel_size


class CustomSeq2SeqTrainer(Seq2SeqTrainer):
    r"""
    Inherits Seq2SeqTrainer to compute generative metrics such as BLEU and ROUGE.
    """

    @override
    def _get_train_sampler(self):
        args = get_args()

        if args.sequence_parallel_size > 1:
            return SequenceParallelSampler(
                self.train_dataset,
                args.per_device_train_batch_size,
                args.sequence_parallel_size,
                dist.get_world_size(),
            )
        else:
            return super()._get_train_sampler()

    @override
    def compute_loss(self, model, inputs, return_outputs=False, **kwargs):
        args = get_args()

        if args.sequence_parallel_size == 1:
            loss = super().compute_loss(model, inputs, return_outputs, **kwargs)
        else:
            _, outputs = super().compute_loss(model, inputs, return_outputs=True, **kwargs)
            loss_function = CrossEntropyLoss(reduction="sum")
            logits, labels = outputs["logits"] if isinstance(outputs, dict) else outputs[1], inputs["labels"]

            unwrapped_model = self.accelerator.unwrap_model(model)
            if _is_peft_model(unwrapped_model):
                vocab_size = unwrapped_model.base_model.model.config.vocab_size
            else:
                vocab_size = unwrapped_model.config.vocab_size

            logits = logits.view(-1, vocab_size)
            labels = labels.view(-1)

            labels = labels.to(logits.device)
            loss = loss_function(logits, labels)

            sp_group = DistributedTrainingModule.get_sequence_parallel_group()
            loss = dist.nn.all_reduce(loss, op=dist.ReduceOp.SUM, group=sp_group)
            label_num = (labels != loss_function.ignore_index).sum()
            label_num = dist.nn.all_reduce(label_num, op=dist.ReduceOp.SUM, group=sp_group)
            loss /= label_num

        if is_transformers_version_equal_to_4_46() and not getattr(self, "model_accepts_loss_kwargs", False):
            if return_outputs:
                return (loss[0] / self.args.gradient_accumulation_steps, *loss[1:])
            else:
                return loss / self.args.gradient_accumulation_steps

        return loss
