# SPDX-License-Identifier: MIT
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.

"""Custom normalization layers."""
import torch
import torch_npu
from typing import Optional, Union, Any
from vllm.model_executor.layers.layernorm import RMSNorm as RMSNormGPU
from vllm.distributed import get_tp_group
from vllm.distributed.parallel_state import get_tensor_model_parallel_world_size, get_tensor_model_parallel_rank

from omni.models.common.config.model_config import model_extra_config


class RMSNorm(RMSNormGPU):
    """Unified RMSNorm layer that supports:
       - residual connection (with fused NPU kernel)
       - optional dynamic quantization
       - optional distributed all_gather transform
    """

    def __init__(
        self,
        hidden_size: int,
        eps: float = 1e-6,
        var_hidden_size: Optional[int] = None,
        module_name: Optional[str] = "",
    ) -> None:
        super().__init__(hidden_size, eps, var_hidden_size)
        self.module_name = module_name
        # 分布式信息
        self.tp_size = get_tensor_model_parallel_world_size()
        self.tp_rank = get_tensor_model_parallel_rank()

    def forward(
        self,
        x: torch.Tensor,
        residual: Optional[torch.Tensor] = None,
        quant_symbol: bool = False,
        y_transform: str = "",
        always_with_residual: bool = False,
    ) -> Union[tuple[dict[str, Any], Any], Any]:
        """Forward with support for residual fusion, quantization, and all_gather."""
        if residual is not None:
            # NPU fused add + RMSNorm
            x, _, residual = torch_npu.npu_add_rms_norm(
                x, residual, self.weight, self.variance_epsilon
            )

            if quant_symbol:
                x_int8, pertoken_scale = torch_npu.npu_dynamic_quant(x)
                x = {"x_int8": x_int8, "pertoken_scale": pertoken_scale}

            if y_transform == "AG":
                x = get_tp_group().all_gather(x, dim=0)

            return x, residual

        else:
            if always_with_residual:
                residual = x
            x = torch_npu.npu_rms_norm(
                x,
                self.weight.data,
                self.variance_epsilon,
            )[0]
            if always_with_residual:
                if y_transform == "AG":
                    x = get_tp_group().all_gather(x, dim=0)
                return x, residual
            return x