# Copyright (c) 2025 Huawei Technologies Co., Ltd.
#
# openMind is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
#          http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.

from functools import partial

import torch
import torch.distributed as dist

from openmind.flow.model.sequence_parallel.ulysses import UlyssesAttention
from openmind.integrations.transformers.npu_fused_ops.sdk import SUPPORTED_FUSED_MODELS
from openmind.utils import logging

_SEQUENCE_PARALLEL_GROUP = None
logger = logging.get_logger(__name__)


class DistributedTrainingModule:

    @staticmethod
    def initialize_sequence_parallel(
        world_size: int,
        rank: int,
        sequence_parallel_size: int = 1,
    ):
        global _SEQUENCE_PARALLEL_GROUP

        if world_size % sequence_parallel_size:
            raise ValueError(
                f"World size ({world_size}) must be devisible by sequence parallel size ({sequence_parallel_size})."
            )

        num_sequence_parallel_groups = world_size // sequence_parallel_size

        for i in range(num_sequence_parallel_groups):
            ranks = range(i * sequence_parallel_size, (i + 1) * sequence_parallel_size)
            group = dist.new_group(ranks)
            if rank in ranks:
                _SEQUENCE_PARALLEL_GROUP = group

    @staticmethod
    def get_sequence_parallel_world_size():
        return get_sequence_parallel_world_size()

    @staticmethod
    def get_sequence_parallel_rank():
        return get_sequence_parallel_rank()

    @staticmethod
    def get_sequence_parallel_group():
        return get_sequence_parallel_group()


def get_sequence_parallel_world_size():
    return dist.get_world_size(group=get_sequence_parallel_group())


def get_sequence_parallel_rank():
    return dist.get_world_size(group=get_sequence_parallel_rank())


def get_sequence_parallel_group():
    if _SEQUENCE_PARALLEL_GROUP is None:
        raise ValueError(
            "The sequence parallel group is not initialized. Please call initialize_sequence_parallel first."
        )
    return _SEQUENCE_PARALLEL_GROUP


def new_attn_forward(
    query_states,
    key_states,
    value_states,
    group,
    attn_fn,
    dropout_p=0.0,
    scale=None,
    **kwargs,
):
    dist_attn = UlyssesAttention(sequence_process_group=group, attn_fn=attn_fn)
    attn_output = dist_attn(query_states, key_states, value_states, dropout_p, scale)

    return attn_output


def apply_sequence_parallel(args, config):
    if args.sequence_parallel_size > 1:
        if config.num_attention_heads % args.sequence_parallel_size:
            raise ValueError(
                "num_attention_head must be divisible by sequence_parallel_size for sequence parallel training."
                f"{config.num_attention_heads} can not be devisible by {args.sequence_parallel_size}"
            )

        if not (config.architectures and config.architectures[0] in SUPPORTED_FUSED_MODELS and args.do_train):
            raise ValueError(
                "Sequence parallel trainning does not support models that cannot enable npu fused options."
            )

        group_this = DistributedTrainingModule.get_sequence_parallel_group()
        original_attn = torch.nn.functional.scaled_dot_product_attention
        new_attention_forward = partial(new_attn_forward, group=group_this, attn_fn=original_attn)
        torch.nn.functional.scaled_dot_product_attention = new_attention_forward
        logger.info_rank0("Enable sequence parallel training for the model.")
