# Copyright 2021 The HuggingFace Inc. team.
# 2025.01.14 - Adapt to openmind.
#              Huawei Technologies Co., Ltd.
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import os
import re
import sys
import threading
import typing
import importlib
from typing import Optional, Union, List
from types import ModuleType
from pathlib import Path
import hashlib
import functools

import torch
import transformers
from transformers.dynamic_module_utils import get_relative_import_files
from transformers.utils.hub import HF_MODULES_CACHE
from transformers import PretrainedConfig

from openmind.utils import logging
from openmind.integrations.transformers.npu_fused_ops.attenions import internlm2
from openmind.integrations.transformers.npu_fused_ops.rms_norm import rms_norm
from openmind.integrations.transformers.npu_fused_ops.rope import rope
from openmind.integrations.transformers.npu_fused_ops.swiglu import swiglu

logger = logging.get_logger()

_HF_REMOTE_CODE_LOCK = threading.Lock()

DYNAMIC_MODELS = {}


@dataclasses.dataclass
class Pattern:
    attention: str = "ATTENTION_CLASSES"
    rmsnorm: str = "RMSNorm"
    rope: str = "apply_rotary_pos_emb"
    swiglu: str = "MLP"


def register_dynamic_model(model_name: str, /, **kwargs):
    """
    Args:
        model_name: Autoclass name, such as InternLM2ForCausalLM.
        **kwargs: supported npu fused options, all kwargs can be None.
        kwargs include the follow params:
            npu_fusion_attention: the adapter module of npu fused attention.
            rms_norm: the class of npu fused rms norm.
            rope:  the function of npu fused rotary position embedding
            swiglu: the class of npu fused SwiGLU.

    """
    kwargs.update({"model_name": model_name})
    DYNAMIC_MODELS[model_name] = kwargs


register_dynamic_model(
    "InternLM2ForCausalLM",
    npu_fusion_attention=internlm2,
    rms_norm=rms_norm.NpuRMSNorm,
    rope=rope.apply_rotary_pos_emb,
    swiglu=swiglu.NpuIntern2SwiGlu,
)


def _raw_get_dynamic_module(
    class_name: str,
    module_path: Union[str, os.PathLike],
    *,
    force_reload: bool = False,
):
    """
    Get dynamic module from py file, copied from transformers.dynamic_module_utils.get_class_in_module.
    """

    name = os.path.normpath(module_path)
    if name.endswith(".py"):
        name = name[:-3]
    name = name.replace(os.path.sep, ".")
    module_file: Path = Path(HF_MODULES_CACHE) / module_path
    with _HF_REMOTE_CODE_LOCK:
        if force_reload:
            sys.modules.pop(name, None)
            importlib.invalidate_caches()
        cached_module: Optional[ModuleType] = sys.modules.get(name)
        module_spec = importlib.util.spec_from_file_location(name, location=module_file)

        # Hash the module file and all its relative imports to check if we need to reload it
        module_files: List[Path] = [module_file] + sorted(map(Path, get_relative_import_files(module_file)))
        module_hash: str = hashlib.sha256(b"".join(bytes(f) + f.read_bytes() for f in module_files)).hexdigest()

        module: ModuleType
        if cached_module is None:
            module = importlib.util.module_from_spec(module_spec)
            # insert it into sys.modules before any loading begins
            sys.modules[name] = module
        else:
            module = cached_module
        if getattr(module, "__transformers_module_hash__", "") != module_hash:
            module_spec.loader.exec_module(module)
            module.__transformers_module_hash__ = module_hash
    return module


def _dynamic_patch_flash_attention(model_name: str, module: ModuleType, **kwargs):
    if model_name not in DYNAMIC_MODELS:
        return
    if torch.__version__ == "2.1.0":
        pattern = re.compile(Pattern.attention)
        attention_classes_attr = [attr for attr in dir(module) if pattern.search(attr)]
        attention_classes = getattr(module, attention_classes_attr[0])
        if DYNAMIC_MODELS[model_name].get("npu_fusion_attention"):
            npu_attention_class = type(
                "NPUFusionAttention",
                (attention_classes["eager"],),
                {"forward": DYNAMIC_MODELS[model_name].get("npu_fusion_attention").forward},
            )
            attention_classes.update({k: npu_attention_class for k in attention_classes})
    elif torch.__version__ >= "2.6.0":
        config = kwargs.get("config")
        setattr(config, "_attn_implementation", "sdpa")
    else:
        config = kwargs.get("config")
        setattr(config, "_attn_implementation", "eager")


def _dynamic_patch_rms_norm(model_name: str, module: ModuleType):
    if model_name not in DYNAMIC_MODELS:
        return
    pattern = re.compile(Pattern.rmsnorm)
    norm_attr = [attr for attr in dir(module) if pattern.search(attr)]
    if DYNAMIC_MODELS[model_name].get("rms_norm"):
        setattr(module, norm_attr[0], DYNAMIC_MODELS[model_name].get("rms_norm"))


def _dynamic_patch_rope(model_name, module):
    if model_name not in DYNAMIC_MODELS:
        return
    pattern = re.compile(Pattern.rope)
    rope_attr = [attr for attr in dir(module) if pattern.search(attr)]
    if DYNAMIC_MODELS[model_name].get("rope"):
        setattr(module, rope_attr[0], DYNAMIC_MODELS[model_name].get("rope"))
        if DYNAMIC_MODELS[model_name].get("npu_fusion_attention"):
            setattr(
                DYNAMIC_MODELS[model_name].get("npu_fusion_attention"),
                rope_attr[0],
                DYNAMIC_MODELS[model_name].get("rope"),
            )


def _dynamic_patch_swiglu(model_name, module):
    pattern = re.compile(Pattern.swiglu)
    swiglu_attr = [attr for attr in dir(module) if pattern.search(attr)]
    if DYNAMIC_MODELS[model_name].get("swiglu"):
        setattr(module, swiglu_attr[0], DYNAMIC_MODELS[model_name].get("swiglu"))


def dynamic_operator_decorator(operator: typing.Callable, enable: bool = True, **kwargs):
    def decorator(_get_dynamic_module):
        if not enable:
            return _get_dynamic_module

        @functools.wraps(_get_dynamic_module)
        def wrapper(class_name: str, module_path: Union[str, os.PathLike], *, force_reload: bool = False):
            module = _get_dynamic_module(class_name, module_path, force_reload=force_reload)
            operator(class_name, module, **kwargs)
            return module

        return wrapper

    return decorator


def patch_dynamic_fused_ops(
    use_npu_fusion_attention: bool = True,
    use_fused_rms_norm: bool = True,
    use_fused_rope: bool = True,
    use_fused_swiglu: bool = True,
    config: PretrainedConfig = None,
):
    @dynamic_operator_decorator(operator=_dynamic_patch_flash_attention, enable=use_npu_fusion_attention, config=config)
    @dynamic_operator_decorator(operator=_dynamic_patch_rms_norm, enable=use_fused_rms_norm)
    @dynamic_operator_decorator(operator=_dynamic_patch_rope, enable=use_fused_rope)
    @dynamic_operator_decorator(operator=_dynamic_patch_swiglu, enable=use_fused_swiglu)
    def _get_dynamic_module(
        class_name: str,
        module_path: Union[str, os.PathLike],
        *,
        force_reload: bool = False,
    ):
        return _raw_get_dynamic_module(class_name, module_path, force_reload=force_reload)

    def _get_class_in_module(
        class_name: str,
        module_path: Union[str, os.PathLike],
        *,
        force_reload: bool = False,
    ) -> typing.Type:
        module = _get_dynamic_module(class_name=class_name, module_path=module_path, force_reload=force_reload)
        return getattr(module, class_name)

    transformers.dynamic_module_utils.get_class_in_module = _get_class_in_module
