# Copyright (c) 2025 Huawei Technologies Co., Ltd.
#
# openMind is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
#          http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
import typing
from functools import lru_cache

import transformers

from openmind import is_torch_available
from openmind.integrations.transformers.npu_fused_ops import kernel
from openmind.utils import is_torch_npu_available, logging, version
from openmind.flow import arguments

if is_torch_available():
    import torch
    from accelerate import PartialState

logger = logging.get_logger()


@lru_cache
def check_use_fused_kernel(inner=False) -> bool:
    """
    Args:
        inner: When this method is called internally within openmind-cli, the value of this parameter is True,
        and the openmind related args will be verified. when the outside user try to call `apply_fused_kernel()`
        to use npu fused options, this parameter is False, the openmind-cli args will not be verified.

    Returns: If the environment and the parameters allowed use fused options, return True, otherwise return False

    """
    if not is_torch_available():
        return False

    state = PartialState()
    device_module = getattr(torch, state.device.type.lower(), None)

    # not in npu environment
    if "npu" not in str(device_module):
        return False

    # torch npu is not available
    if not is_torch_npu_available():
        return False

    # npu fused option switch is closed
    if inner:
        try:
            args = arguments.get_args()
        except ValueError:
            arguments.initialize_openmind()
            args = arguments.get_args()
        if args.disable_fused_options:
            return False

    # installed version of transformers and torch is not compatible for npu fused options
    if version.check_package_version("transformers>=4.51.1, <=4.51.3") and (
        version.check_package_version("torch>=2.1.0, <2.1.1") or version.check_package_version("torch>=2.6.0, <2.6.1")
    ):
        return True
    else:
        logger.warning_rank0(
            f"RuntimeWarning: The npu fused options is not available under the transformers v{transformers.__version__}.  "
            f"To use npu fused options, you need torch == 2.1.0 or 2.6.0, and transformers >= 4.51.1, <=4.51.3 . "
            f"In other cases, the npu fused options will not available. "
        )
        return False


@lru_cache
def _parse_params(**kwargs):
    check_status = check_use_fused_kernel(inner=kwargs.get("inner", False))
    params = {
        "use_npu_fusion_attention": check_status,
        "use_fused_rms_norm": check_status,
        "use_fused_rope": check_status,
        "use_fused_swiglu": check_status,
    }
    for k, v in kwargs.items():
        if k in params and v is not None:
            params[k] = v if check_status else False
    return params


def _apply_log(model_type: str = None, **kwargs):
    model = model_type if model_type else "supported"
    kwargs.pop("config", None)
    kwargs = _parse_params(**kwargs)
    if kwargs.get("use_npu_fusion_attention", False):
        logger.info_rank0(f"The {model} model will load with npu fused attention.")
    if kwargs.get("use_fused_rms_norm", False):
        logger.info_rank0(f"The {model} model will load with npu fused RMSNorm.")
    if kwargs.get("use_fused_rope", False):
        logger.info_rank0(f"The {model} model will load with npu fused RoPE.")
    if kwargs.get("use_fused_swiglu", False):
        logger.info_rank0(f"The {model} model will load with npu fused SwiGLU.")


def _apply_fused_kernel_generic(apply_func: typing.Callable, **kwargs):
    config = kwargs.pop("config", None)
    params = _parse_params(**kwargs)
    apply_func(config=config, **params)


def apply_fused_kernel(**kwargs):
    """
    Apply npu fused operator for all support models and fused operators, when call this function, all supported
    fusion operators and models will be enabled by default. You can set the following parameters to disable the
    specified fusion operator:
         `use_npu_fusion_attention: bool = False`, default is True, set it to `False` to disable npu fusion attention.
         `use_fused_rms_norm: bool = False`, default is True, set it to `False` to disable npu RMSNorm.
         `use_fused_rope: bool = False`, default is True, set it to `False` to disable npu apply_rotary_pos_emb.
         `use_fused_swiglu: bool = False`, default is True, set it to `False` to disable npu swiglu.
    """
    _apply_fused_kernel_generic(kernel.apply_fused_kernel_qwen2, **kwargs)
    _apply_fused_kernel_generic(kernel.apply_fused_kernel_qwen3, **kwargs)
    _apply_fused_kernel_generic(kernel.apply_fused_kernel_llama, **kwargs)
    _apply_fused_kernel_generic(kernel.apply_fused_kernel_mistral, **kwargs)
    _apply_fused_kernel_generic(kernel.apply_fused_kernel_internlm2, **kwargs)
    _apply_fused_kernel_generic(kernel.apply_fused_kernel_internlm3, **kwargs)
    _apply_log(model_type=None, **kwargs)


def apply_fused_kernel_to_qwen2(**kwargs):
    """
    Apply npu fused operators for Qwen2 series models, when call this function, all supported
    fusion operators will be enabled by default. You can set the following parameters to disable the
    specified fused operator:
         `use_npu_fusion_attention: bool`, default is True, set it to `False` to disable npu fusion attention.
         `use_fused_rms_norm: bool`, default is True, set it to `False` to disable npu RMSNorm.
         `use_fused_rope: bool`, default is True, set it to `False` to disable npu fused RoPE
         `use_fused_swiglu: bool`, default is True, set it to `False` to disable npu fused SwiGLU.
    """
    _apply_fused_kernel_generic(kernel.apply_fused_kernel_qwen2, **kwargs)
    _apply_log(model_type="qwen2", **kwargs)


def apply_fused_kernel_to_qwen3(**kwargs):
    """
    Apply npu fused operators for Qwen3 series models, when call this function, all supported
    fusion operators will be enabled by default.
    """
    _apply_fused_kernel_generic(kernel.apply_fused_kernel_qwen3, **kwargs)
    _apply_log(model_type="qwen3", **kwargs)


def apply_fused_kernel_to_internlm2(**kwargs):
    """
    Apply npu fused operators for Internlm2 series models, when call this function, all supported
    fusion operators will be enabled by default.
    """
    _apply_fused_kernel_generic(kernel.apply_fused_kernel_internlm2, **kwargs)
    _apply_log(model_type="internlm2", **kwargs)


def apply_fused_kernel_to_internlm3(**kwargs):
    """
    Apply npu fused operators for Internlm2 series models, when call this function, all supported
    fusion operators will be enabled by default.
    """
    _apply_fused_kernel_generic(kernel.apply_fused_kernel_internlm3, **kwargs)
    _apply_log(model_type="internlm3", **kwargs)


def apply_fused_kernel_to_llama(**kwargs):
    """
    Apply npu fused operators for Llama series models, when call this function, all supported
    fusion operators will be enabled by default.
    """
    _apply_fused_kernel_generic(kernel.apply_fused_kernel_llama, **kwargs)
    _apply_log(model_type="llama", **kwargs)


def apply_fused_kernel_to_mistral(**kwargs):
    """
    Apply npu fused operators for Mistral series models, when call this function, all supported
    fusion operators will be enabled by default.
    """
    _apply_fused_kernel_generic(kernel.apply_fused_kernel_mistral, **kwargs)
    _apply_log(model_type="mistral", **kwargs)


SUPPORTED_FUSED_MODELS = {
    "Qwen2ForCausalLM": apply_fused_kernel_to_qwen2,
    "Qwen3ForCausalLM": apply_fused_kernel_to_qwen3,
    "LlamaForCausalLM": apply_fused_kernel_to_llama,
    "MistralForCausalLM": apply_fused_kernel_to_mistral,
    "InternLM2ForCausalLM": apply_fused_kernel_to_internlm2,
    "InternLM3ForCausalLM": apply_fused_kernel_to_internlm3,
}


def map_fused_kernel_to_model(architectures, **kwargs):
    if not architectures:
        logger.warning_rank0("Unknown model architectures for npu fused options")
        return

    if architectures[0] not in SUPPORTED_FUSED_MODELS:
        logger.warning_rank0(f"Unsupported model architecture for npu fused options: {architectures[0]}")
        return

    SUPPORTED_FUSED_MODELS.get(architectures[0])(inner=True, **kwargs)
