# Copyright (c) 2025 Huawei Technologies Co., Ltd.
#
# openMind is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
#          http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
import typing

from openmind.integrations.transformers.npu_fused_ops import kernel
from openmind.utils import is_torch_npu_available

_USE_FUSED_KERNEL = True


def _parse_params(**kwargs):
    default_value = _USE_FUSED_KERNEL
    params = {
        "use_npu_fusion_attention": default_value,
        "use_fused_rms_norm": default_value,
    }
    for k, v in kwargs.items():
        if k in params and v is not None:
            params[k] = v and _USE_FUSED_KERNEL and is_torch_npu_available()
    return params


def _apply_fused_kernel_generic(apply_func: typing.Callable, **kwargs):
    params = _parse_params(**kwargs)
    apply_func(**params)


def apply_fused_kernel(**kwargs):
    """
    Apply npu fused operator for all support models and fused operators, when call this function, all supported
    fusion operators and models will be enabled by default. You can set the following parameters to disable the
    specified fusion operator:
         `use_npu_fusion_attention: bool = False`, default is True, set it to `False` to disable npu fusion attention.
         `use_fused_rms_norm: bool = False`, default is True, set it to `False` to disable npu RMSNorm.
    """
    _apply_fused_kernel_generic(kernel.apply_fused_kernel_qwen2, **kwargs)
    _apply_fused_kernel_generic(kernel.apply_fused_kernel_llama, **kwargs)
    _apply_fused_kernel_generic(kernel.apply_fused_kernel_mistral, **kwargs)
    _apply_fused_kernel_generic(kernel.apply_fused_kernel_internlm2, **kwargs)
    _apply_fused_kernel_generic(kernel.apply_fused_kernel_internlm3, **kwargs)


def apply_fused_kernel_to_qwen2(**kwargs):
    """
    Apply npu fused operators for Qwen2 series models, when call this function, all supported
    fusion operators will be enabled by default. You can set the following parameters to disable the
    specified fused operator:
         `use_npu_fusion_attention: bool = False`, default is True, set it to `False` to disable npu fusion attention.
         `use_fused_rms_norm: bool = False`, default is True, set it to `False` to disable npu RMSNorm.
    """
    _apply_fused_kernel_generic(kernel.apply_fused_kernel_qwen2, **kwargs)


def apply_fused_kernel_to_internlm2(**kwargs):
    """
    Apply npu fused operators for Internlm2 series models, when call this function, all supported
    fusion operators will be enabled by default. You can set the following parameters to disable the
    specified fused operator:
         `use_npu_fusion_attention: bool = False`, default is True, set it to `False` to disable npu fusion attention.
         `use_fused_rms_norm: bool = False`, default is True, set it to `False` to disable npu RMSNorm.
    """
    _apply_fused_kernel_generic(kernel.apply_fused_kernel_internlm2, **kwargs)


def apply_fused_kernel_to_internlm3(**kwargs):
    """
    Apply npu fused operators for Internlm2 series models, when call this function, all supported
    fusion operators will be enabled by default. You can set the following parameters to disable the
    specified fused operator:
         `use_npu_fusion_attention: bool = False`, default is True, set it to `False` to disable npu fusion attention.
         `use_fused_rms_norm: bool = False`, default is True, set it to `False` to disable npu RMSNorm.
    """
    _apply_fused_kernel_generic(kernel.apply_fused_kernel_internlm3, **kwargs)


def apply_fused_kernel_to_llama(**kwargs):
    """
    Apply npu fused operators for Llama series models, when call this function, all supported
    fusion operators will be enabled by default. You can set the following parameters to disable the
    specified fused operator:
         `use_npu_fusion_attention: bool = False`, default is True, set it to `False` to disable npu fusion attention.
         `use_fused_rms_norm: bool = False`, default is True, set it to `False` to disable npu RMSNorm.
    """
    _apply_fused_kernel_generic(kernel.apply_fused_kernel_llama, **kwargs)


def apply_fused_kernel_to_mistral(**kwargs):
    """
    Apply npu fused operators for Mistral series models, when call this function, all supported
    fusion operators will be enabled by default. You can set the following parameters to disable the
    specified fused operator:
         `use_npu_fusion_attention: bool = False`, default is True, set it to `False` to disable npu fusion attention.
         `use_fused_rms_norm: bool = False`, default is True, set it to `False` to disable npu RMSNorm.
    """
    _apply_fused_kernel_generic(kernel.apply_fused_kernel_mistral, **kwargs)
