# Copyright (c) 2025 Huawei Technologies Co., Ltd.
#
# openMind is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
#          http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
import dataclasses
import re
from types import ModuleType
from typing import Dict, Type

from transformers.models.qwen2 import modeling_qwen2
from transformers.models.llama import modeling_llama
from transformers.models.mistral import modeling_mistral

from openmind.utils import logging
from openmind.integrations.transformers.npu_fused_ops import attenions, rms_norm, rope, swiglu
from openmind.integrations.transformers.npu_fused_ops import dynamic_module_utils

logger = logging.get_logger()


@dataclasses.dataclass
class Pattern:
    attention: str = "ATTENTION_CLASSES"
    rmsnorm: str = "RMSNorm"
    rope: str = "apply_rotary_pos_emb"
    swiglu: str = "MLP"


def _builtin_patch_flash_attention(RAW_ATTENTION_CLASSES: Dict, NEW_ATTENTION_CLASS: Type):
    """
    Patch the FA for transformers built-in models, call this method before the model instantiation is completed,
    when the model has already been instantiated, this method is not effective.
    """
    RAW_ATTENTION_CLASSES.update({k: NEW_ATTENTION_CLASS for k in RAW_ATTENTION_CLASSES})


def _builtin_patch_rmsnorm(module: ModuleType, class_name: str):
    """
    Patch the RMSNorm for transformers built-in models, call this method before the model instantiation is completed,
    when the model has already been instantiated, this method is not effective.
    usage example:
        _patch_rmsnorm_built_in(transformers.models.qwen2.modeling_qwen2, "Qwen2RMSNorm", rms_norm.NpuRMSNorm)
    """
    setattr(module, class_name, rms_norm.rms_norm.NpuRMSNorm)


def _builtin_patch_rope(module: ModuleType, func_name: str):
    setattr(module, func_name, rope.rope.apply_rotary_pos_emb)


def _builtin_patch_swiglu(module: ModuleType, class_name: str):
    setattr(module, class_name, swiglu.swiglu.NpuSwiGlu)


def _apply_fused_kernel_base(module: ModuleType, **kwargs):
    if kwargs.get("use_npu_fusion_attention", False):
        attention = kwargs.get("attention")
        pattern = re.compile(Pattern.attention)
        attention_classes_attr = [attr for attr in dir(module) if pattern.search(attr)][0]
        _builtin_patch_flash_attention(getattr(module, attention_classes_attr), attention)
        logger.warning_once("The model will load with npu fusion attention.")

    if kwargs.get("use_fused_rms_norm", False):
        pattern = re.compile(Pattern.rmsnorm)
        norm_attr = [attr for attr in dir(module) if pattern.search(attr)]
        _builtin_patch_rmsnorm(module, norm_attr[0])
        logger.warning_once("The model will load with npu fused RMSNorm.")

    if kwargs.get("use_fused_rope", False):
        pattern = re.compile(Pattern.rope)
        rope_attr = [attr for attr in dir(module) if pattern.search(attr)]
        _builtin_patch_rope(module, rope_attr[0])
        logger.warning_once("The model will load with npu fused RoPE.")

    if kwargs.get("use_fused_swiglu", False):
        pattern = re.compile(Pattern.swiglu)
        swiglu_attr = [attr for attr in dir(module) if pattern.search(attr)]
        _builtin_patch_swiglu(module, swiglu_attr[0])
        logger.warning_once("The model will load with npu fused SwiGLU.")


def apply_fused_kernel_qwen2(**kwargs):
    _apply_fused_kernel_base(modeling_qwen2, attention=attenions.qwen2.Qwen2NPUAttention, **kwargs)


def apply_fused_kernel_llama(**kwargs):
    _apply_fused_kernel_base(modeling_llama, attention=attenions.llama.LlamaNpuFusionAttention, **kwargs)


def apply_fused_kernel_mistral(**kwargs):
    _apply_fused_kernel_base(modeling_mistral, attention=attenions.mistral.MistralNpuFlashAttention, **kwargs)


def apply_fused_kernel_internlm2(**kwargs):
    use_npu_fusion_attention = kwargs.get("use_npu_fusion_attention", False)
    use_fused_rms_norm = kwargs.get("use_fused_rms_norm", False)
    use_fused_rope = kwargs.get("use_fused_rope", False)
    use_fused_swiglu = kwargs.get("use_fused_swiglu", False)
    if "InternLM2ForCausalLM" not in dynamic_module_utils.DYNAMIC_MODELS:
        dynamic_module_utils.register_dynamic_model(
            "InternLM2ForCausalLM",
            npu_fusion_attention=attenions.internlm2,
            rms_norm=rms_norm.rms_norm.NpuRMSNorm,
            rope=rope.rope.apply_rotary_pos_emb,
            swiglu=swiglu.swiglu.NpuIntern2SwiGlu,
        )
    dynamic_module_utils.patch_dynamic_fused_ops(
        use_npu_fusion_attention=use_npu_fusion_attention,
        use_fused_rms_norm=use_fused_rms_norm,
        use_fused_rope=use_fused_rope,
        use_fused_swiglu=use_fused_swiglu,
    )


def apply_fused_kernel_internlm3(**kwargs):
    use_npu_fusion_attention = kwargs.get("use_npu_fusion_attention", False)
    use_fused_rms_norm = kwargs.get("use_fused_rms_norm", False)
    use_fused_rope = kwargs.get("use_fused_rope", False)
    use_fused_swiglu = kwargs.get("use_fused_swiglu", False)
    if "InternLM3ForCausalLM" not in dynamic_module_utils.DYNAMIC_MODELS:
        dynamic_module_utils.register_dynamic_model(
            "InternLM3ForCausalLM",
            npu_fusion_attention=attenions.internlm3,
            rms_norm=rms_norm.rms_norm.NpuRMSNorm,
            rope=rope.rope.apply_rotary_pos_emb,
            swiglu=swiglu.swiglu.NpuSwiGlu,
        )
    dynamic_module_utils.patch_dynamic_fused_ops(
        use_npu_fusion_attention=use_npu_fusion_attention,
        use_fused_rms_norm=use_fused_rms_norm,
        use_fused_rope=use_fused_rope,
        use_fused_swiglu=use_fused_swiglu,
    )
