# Copyright (c) 2025 Huawei Technologies Co., Ltd.
#
# openMind is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
#          http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
import dataclasses
import re
from types import ModuleType

from transformers.models.qwen2 import modeling_qwen2
from transformers.models.llama import modeling_llama
from transformers.models.mistral import modeling_mistral

from openmind.utils import version
from openmind.utils.version import check_package_version
from openmind.integrations.transformers.npu_fused_ops import rms_norm, rope, swiglu, attentions
from openmind.integrations.transformers.npu_fused_ops import dynamic_module_utils


@dataclasses.dataclass
class Pattern:
    rmsnorm: str = "RMSNorm"
    rope: str = "apply_rotary_pos_emb"
    swiglu: str = "MLP"


def _patch_sdpa_forward():
    """
    The purpose of this patch is to enable the native SDPA forward function of transformers to adapt to the
    SDPA interface of NPU. If not, calling the SDPA interface is still in the eagle mode
    """
    if not check_package_version("transformers>=4.51.1, <=4.51.3"):
        return
    from transformers.integrations import sdpa_attention
    from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, AttentionInterface

    sdpa_attention.sdpa_attention_forward = attentions.sdpa_attention.sdpa_attention_forward
    AttentionInterface._global_mapping["sdpa"] = attentions.sdpa_attention.sdpa_attention_forward
    ALL_ATTENTION_FUNCTIONS["sdpa"] = attentions.sdpa_attention.sdpa_attention_forward


def _builtin_patch_flash_attention(config=None):
    """
    Patch the FA for transformers built-in models, call this method before the model instantiation is completed,
    when the model has already been instantiated, this method is not effective.
    """
    _patch_sdpa_forward()
    if config is not None:
        setattr(config, "_attn_implementation", "sdpa")


def _builtin_patch_rmsnorm(module: ModuleType, class_name: str):
    """
    Patch the RMSNorm for transformers built-in models, call this method before the model instantiation is completed,
    when the model has already been instantiated, this method is not effective.
    usage example:
        _patch_rmsnorm_built_in(transformers.models.qwen2.modeling_qwen2, "Qwen2RMSNorm", rms_norm.NpuRMSNorm)
    """
    setattr(module, class_name, rms_norm.rms_norm.NpuRMSNorm)


def _builtin_patch_rope(module: ModuleType, func_name: str):
    setattr(module, func_name, rope.rope.apply_rotary_pos_emb)


def _builtin_patch_swiglu(module: ModuleType, class_name: str):
    setattr(module, class_name, swiglu.swiglu.NpuSwiGlu)


def _apply_fused_kernel_base(module: ModuleType, **kwargs):
    config = kwargs.get("config", None)
    if kwargs.get("use_npu_fusion_attention", False):
        _builtin_patch_flash_attention(config)
    else:
        # If the FA fused option is not open, enforce eager mode.
        # Note: if the config is None, the default value of `_attn_implementation` is `sdpa`, but because of the npu sdpa
        # implementation, it will also run as eager mode, furthermore, the performance of this case is worse than transformers
        # native implementation of eager mode.
        if config is not None:
            setattr(config, "_attn_implementation", "eager")

    if kwargs.get("use_fused_rms_norm", False):
        pattern = re.compile(Pattern.rmsnorm)
        norm_attr = [attr for attr in dir(module) if pattern.search(attr)]
        _builtin_patch_rmsnorm(module, norm_attr[0])

    if kwargs.get("use_fused_rope", False):
        pattern = re.compile(Pattern.rope)
        rope_attr = [attr for attr in dir(module) if pattern.search(attr)]
        _builtin_patch_rope(module, rope_attr[0])

    if kwargs.get("use_fused_swiglu", False):
        pattern = re.compile(Pattern.swiglu)
        swiglu_attr = [attr for attr in dir(module) if pattern.search(attr)]
        _builtin_patch_swiglu(module, swiglu_attr[0])


def apply_fused_kernel_qwen2(**kwargs):
    _apply_fused_kernel_base(modeling_qwen2, **kwargs)


def apply_fused_kernel_qwen3(**kwargs):
    if version.check_package_version("transformers>=4.51.1, <=4.51.3"):
        from transformers.models.qwen3 import modeling_qwen3

        _apply_fused_kernel_base(modeling_qwen3, **kwargs)
    else:
        # Temporary modification. Clean up related judgments later.
        pass


def apply_fused_kernel_llama(**kwargs):
    _apply_fused_kernel_base(modeling_llama, **kwargs)


def apply_fused_kernel_mistral(**kwargs):
    _apply_fused_kernel_base(modeling_mistral, **kwargs)


def apply_fused_kernel_internlm2(**kwargs):
    use_npu_fusion_attention = kwargs.get("use_npu_fusion_attention", False)
    use_fused_rms_norm = kwargs.get("use_fused_rms_norm", False)
    use_fused_rope = kwargs.get("use_fused_rope", False)
    use_fused_swiglu = kwargs.get("use_fused_swiglu", False)
    config = kwargs.get("config", None)
    if "InternLM2ForCausalLM" not in dynamic_module_utils.DYNAMIC_MODELS:
        dynamic_module_utils.register_dynamic_model(
            "InternLM2ForCausalLM",
            rms_norm=rms_norm.rms_norm.NpuRMSNorm,
            rope=rope.rope.apply_rotary_pos_emb,
            swiglu=swiglu.swiglu.NpuIntern2SwiGlu,
        )
    dynamic_module_utils.patch_dynamic_fused_ops(
        use_npu_fusion_attention=use_npu_fusion_attention,
        use_fused_rms_norm=use_fused_rms_norm,
        use_fused_rope=use_fused_rope,
        use_fused_swiglu=use_fused_swiglu,
        config=config,
    )


def apply_fused_kernel_internlm3(**kwargs):
    use_npu_fusion_attention = kwargs.get("use_npu_fusion_attention", False)
    use_fused_rms_norm = kwargs.get("use_fused_rms_norm", False)
    use_fused_rope = kwargs.get("use_fused_rope", False)
    use_fused_swiglu = kwargs.get("use_fused_swiglu", False)
    config = kwargs.get("config", None)
    if "InternLM3ForCausalLM" not in dynamic_module_utils.DYNAMIC_MODELS:
        dynamic_module_utils.register_dynamic_model(
            "InternLM3ForCausalLM",
            rms_norm=rms_norm.rms_norm.NpuRMSNorm,
            rope=rope.rope.apply_rotary_pos_emb,
            swiglu=swiglu.swiglu.NpuSwiGlu,
        )
    dynamic_module_utils.patch_dynamic_fused_ops(
        use_npu_fusion_attention=use_npu_fusion_attention,
        use_fused_rms_norm=use_fused_rms_norm,
        use_fused_rope=use_fused_rope,
        use_fused_swiglu=use_fused_swiglu,
        config=config,
    )
