# =============================================================================
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================

from typing import Tuple, Union

import torch
import torch.distributed as dist
import torch.nn.functional as F
import torch_npu
from torch import nn

from utils import ARGS, quant_mapping


class ParallelEmbedding(nn.Module):
    def __init__(self, vocab_size: int, hidden_size: int):
        super().__init__()

        if vocab_size % ARGS.atten_tp_size != 0:
            raise ValueError("`vocab_size` must be divisible by `atten_tp_size`.")

        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
        self.part_vocab_size = vocab_size // ARGS.atten_tp_size
        self.vocab_start_idx = ARGS.atten_tp_rank * self.part_vocab_size
        self.vocab_end_idx = self.vocab_start_idx + self.part_vocab_size
        self.weight = nn.Parameter(
            torch.empty(self.part_vocab_size, hidden_size, dtype=ARGS.float_dtype)
        )

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        input_ids = x.clone()
        if ARGS.atten_tp_size > 1:
            mask = (input_ids < self.vocab_start_idx) | (input_ids >= self.vocab_end_idx)
            input_ids -= self.vocab_start_idx
            input_ids *= ~mask
            embeddings = F.embedding(input_ids, self.weight)
            embeddings *= ~mask.unsqueeze(-1)
            dist.all_reduce(embeddings, group=ARGS.atten_tp_group)
        else:
            embeddings = F.embedding(input_ids, self.weight)
        return embeddings


class Linear(nn.Module):
    def __init__(
        self,
        in_features: int,
        out_features: int,
        bias: bool = False,
        dtype:str = None,
        split_bias: bool = True,
    ):
        super().__init__()

        self.in_features = in_features
        self.out_features = out_features
        
        self.quant_enable = False
        if dtype is not None:
            self.quant_enable = "W8A8" in dtype
            self.dynamic_enable = "DYNAMIC" in dtype
            dtype = quant_mapping[dtype]

        self.weight = nn.Parameter(
            torch.empty(out_features, in_features, dtype=dtype or ARGS.model_dtype),
            requires_grad=False
        )

        if bias:
            if split_bias:
                self.bias = nn.Parameter(
                    torch.empty(self.part_out_features if hasattr(self, "part_out_features") else self.part_in_features, dtype=ARGS.float_dtype)
                )
            else:
                self.bias = nn.Parameter(torch.empty(self.out_features, dtype=ARGS.float_dtype))
        else:
            self.register_parameter("bias", None)

        if self.quant_enable:
            self.weight_scale = nn.Parameter(torch.empty(out_features, 1, dtype=torch.float32))
            self.weight_offset = nn.Parameter(torch.empty(out_features, 1, dtype=torch.float16))
            self.smooth_scales = torch.ones(in_features, dtype=torch.float16).npu()
            # deq_scale must be int64
            if not self.dynamic_enable:
                self.deq_scale = nn.Parameter(torch.empty(out_features, dtype=torch.int64), requires_grad=False)
                self.quant_bias = nn.Parameter(torch.empty(out_features, dtype=torch.int32), requires_grad=False)
                self.input_scale = nn.Parameter(torch.empty(in_features, dtype=torch.float32), requires_grad=False)
                self.input_offset = nn.Parameter(torch.empty(in_features, dtype=torch.int8), requires_grad=False)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        if self.quant_enable:
            if ARGS.hardware == '300I':
                output = quant_mm_300I(self, x, self.dynamic_enable)
            else:
                output = quant_mm_800I(self, x, self.dynamic_enable)

            if self.bias is not None:
                output += self.bias
            return output
        else:
            return F.linear(x, self.weight, self.bias)


def quant_mm_300I(self, x: torch.Tensor, dynamic_enable=False) -> torch.Tensor:
    is_tensor_3d = x.dim() > 2
    if is_tensor_3d:
        shape_x = x.shape
        x = x.view(-1, shape_x[2])

    output = torch_npu.npu_quant_matmul_dequant(
        x,
        self.weight,
        weight_scale=self.weight_scale.squeeze(),
        x_scale=None if dynamic_enable else self.input_scale,
        smooth_scale=self.smooth_scales,
        bias=None,
        quant_mode='pertoken' if dynamic_enable else 'pertensor'
    )

    if is_tensor_3d:
        output = output.view(shape_x[0], shape_x[1], -1)

    return output


def quant_mm_800I(self, x: torch.Tensor, dynamic_enable=False) -> torch.Tensor:
    original_dtype = x.dtype
    if dynamic_enable:
        x, pertoken_scale = torch_npu.npu_dynamic_quant(x)
        output = torch_npu.npu_quant_matmul(
            x,
            self.weight.transpose(0,1),
            self.weight_scale, # for pertoken
            pertoken_scale=pertoken_scale, # for pertoken
            bias=None,
            output_dtype=original_dtype,
        )
    else:
        x = torch_npu.npu_quantize(x, self.input_scale, None, torch.qint8, -1, True)
        output = torch_npu.npu_quant_matmul(
            x,
            self.weight.transpose(0,1),
            self.deq_scale,
            bias=None,
            output_dtype=original_dtype,
        )
    return output


class ColumnParallelLinear(Linear):
    def __init__(
        self,
        in_features: int,
        out_features: int,
        tp_size: int,
        bias: bool = False,
        dtype: str = None,
    ):
        if out_features % tp_size != 0:
            raise ValueError(f"`out_features` ({out_features}) must be divisible by `tp_size` ({tp_size}).")

        self.part_out_features = out_features // tp_size
        super().__init__(in_features, self.part_out_features, bias, dtype)


class RowParallelLinear(Linear):
    def __init__(
        self,
        in_features: int,
        out_features: int,
        tp_size: int,
        bias: bool = False,
        dtype: str = None,
        reduce_results: bool = True
    ):
        if in_features % tp_size != 0:
            raise ValueError(f"`in_features` ({in_features}) must be divisible by `tp_size` ({tp_size}).")

        self.part_in_features = in_features // tp_size
        self.tp_size = tp_size
        self.reduce_results = reduce_results
        super().__init__(self.part_in_features, out_features, bias, dtype, split_bias=False)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        if self.quant_enable:
            if ARGS.hardware == '300I':
                output = quant_mm_300I(self, x, self.dynamic_enable)
            else:
                output = quant_mm_800I(self, x, self.dynamic_enable)
        else:
            output = F.linear(x, self.weight)

        if self.tp_size > 1 and self.reduce_results:
            dist.all_reduce(output, group=ARGS.atten_tp_group)
            
        if self.bias is not None:
            output += self.bias

        return output


class RMSNorm(nn.Module):
    def __init__(self, hidden_size: int, eps: float = 1e-5):
        super().__init__()
        self.hidden_size = hidden_size
        self.eps = eps
        self.weight = nn.Parameter(torch.ones(hidden_size, dtype=ARGS.float_dtype))

    def forward(self, x: torch.Tensor, residual: torch.Tensor = None):
        if ARGS.hardware == '300I':
            return self._forward_300I(x, residual)
        else:
            return self._forward_800I(x, residual)

    def _forward_800I(self, x: torch.Tensor, residual: torch.Tensor = None):
        if residual is not None:
            x_out, _, residual_out = torch_npu.npu_add_rms_norm(x, residual, self.weight, self.eps)
            return x_out, residual_out

        x_out, _ = torch_npu.npu_rms_norm(x, self.weight, self.eps)
        return x_out

    def _forward_300I(self, x: torch.Tensor, residual: torch.Tensor = None):
        if residual is not None:
            residual_sum = x + residual
            x_out, _ = torch_npu.npu_rms_norm(residual_sum, self.weight, self.eps)
            return x_out, residual_sum

        x_out, _ = torch_npu.npu_rms_norm(x, self.weight, self.eps)
        return x_out


class PanguRotaryEmbedding(nn.Module):
    def __init__(
        self,
        head_size: int,
        rotary_dim: int,
        max_position_embeddings: int,
        base: int,
        dtype: torch.dtype,
    ) -> None:
        super().__init__()
        self.head_size = head_size
        self.rotary_dim = rotary_dim
        self.max_position_embeddings = max_position_embeddings
        self.base = base
        self.dtype = dtype
        cache = self._compute_cos_sin_cache().to(dtype)
        self.register_buffer("cos_sin_cache", cache, persistent=False)

    def _compute_inv_freq(self, base: Union[int, float]) -> torch.Tensor:
        inv_freq = 1.0 / (base**(torch.arange(
            0, self.rotary_dim, 2, dtype=torch.float32) / self.rotary_dim))
        return inv_freq

    def _compute_cos_sin_cache(self) -> torch.Tensor:
        inv_freq = self._compute_inv_freq(self.base)
        t = torch.arange(self.max_position_embeddings, dtype=torch.float32)
        freqs = torch.einsum("i,j -> ij", t, inv_freq)
        emb = torch.cat((freqs, freqs), dim=-1)
        cos = emb.cos()
        sin = emb.sin()
        return torch.cat([cos, sin], dim=-1)

    def forward(
        self,
        positions: torch.Tensor,
    ) -> Tuple[torch.Tensor, torch.Tensor]:
        pos_flat = positions.flatten()
        cos_sin = torch.index_select(self.cos_sin_cache, 0, pos_flat)
        cos_sin = cos_sin.view(positions.size(0), positions.size(1), 1, -1)
        cos, sin = cos_sin.chunk(2, dim=-1)
        return cos, sin


def apply_swiglu(x: torch.Tensor) -> torch.Tensor:
    if ARGS.hardware == '300I':
        x_fp32 = x.to(torch.float32)
        output = torch_npu.npu_swiglu(x_fp32).to(torch.float16)
    else:
        output = torch_npu.npu_swiglu(x)
    return output


def apply_gmm(
    sorted_hidden_states: torch.Tensor,
    w13_weight: torch.Tensor,
    w2_weight: torch.Tensor,
    group_list: torch.Tensor,
    group_list_type: int,
    topk_scales: torch.Tensor = None,
) -> torch.Tensor:

    gate_up_out = torch_npu.npu_grouped_matmul(
        x=[sorted_hidden_states],
        weight=[w13_weight],
        split_item=2,
        group_list_type=group_list_type,
        group_type=0,
        group_list=group_list,
    )[0]

    gate_up_out = apply_swiglu(gate_up_out)

    if topk_scales is not None:
        gate_up_out *= topk_scales

    down_out = torch_npu.npu_grouped_matmul(
        x=[gate_up_out],
        weight=[w2_weight],
        split_item=2,
        group_list_type=group_list_type,
        group_type=0,
        group_list=group_list,
    )[0]

    return down_out


def apply_quant_gmm_pertoken_800I(
    sorted_hidden_states: torch.Tensor,
    w13_weight: torch.Tensor,
    w13_weight_scale: torch.Tensor,
    w13_weight_offset: torch.Tensor,
    w13_input_scale: torch.Tensor,
    w2_weight: torch.Tensor,
    w2_weight_scale: torch.Tensor,
    w2_weight_offset: torch.Tensor,
    w2_input_scale: torch.Tensor,
    group_list: torch.Tensor,
    original_dtype: torch.dtype = torch.float16,
) -> torch.Tensor:

    if sorted_hidden_states.dtype != w13_weight.dtype:
        quant_input, w13_input_scale = torch_npu.npu_dynamic_quant(sorted_hidden_states)
    else:
        quant_input = sorted_hidden_states

    gate_up_out = torch_npu.npu_grouped_matmul(
        x=[quant_input],
        weight=[w13_weight],
        split_item=2,
        group_list_type=1,
        group_type=0,
        group_list=group_list,
        output_dtype=torch.int32,
    )[0]

    quant_gate_up_out, w2_input_scale = torch_npu.npu_dequant_swiglu_quant(
        gate_up_out,
        weight_scale=w13_weight_scale,
        activation_scale=w13_input_scale,
        group_index=group_list,
        activate_left=True,
        quant_mode=1,
    )
        
    down_out = torch_npu.npu_grouped_matmul(
        x=[quant_gate_up_out],
        weight=[w2_weight],
        scale=[w2_weight_scale],
        per_token_scale=[w2_input_scale],
        split_item=2,
        group_list_type=1,
        group_type=0,
        group_list=group_list,
        output_dtype=original_dtype,
    )[0]

    return down_out
    

def apply_quant_gmm_pertensor_800I(
    sorted_hidden_states: torch.Tensor,
    w13_weight: torch.Tensor,
    w13_weight_scale: torch.Tensor,
    w13_weight_offset: torch.Tensor,
    w13_input_scale: torch.Tensor,
    w13_input_offset: torch.Tensor,
    w2_weight: torch.Tensor,
    w2_weight_scale: torch.Tensor,
    w2_weight_offset: torch.Tensor,
    w2_input_scale: torch.Tensor,
    w2_input_offset: torch.Tensor,
    group_list: torch.Tensor,
    original_dtype: torch.dtype = torch.float16,
) -> torch.Tensor:

    if sorted_hidden_states.dtype != w13_weight.dtype:
        quant_input = torch_npu.npu_quantize(
            sorted_hidden_states,
            scales=w13_input_scale,
            zero_points=None,
            dtype=torch.qint8,
            axis=-1,
            div_mode=True
        )
    else:
        quant_input = sorted_hidden_states

    gate_up_out = torch_npu.npu_grouped_matmul(
        x=[quant_input],
        weight=[w13_weight],
        scale=[w13_weight_scale * w13_input_scale[0]],
        split_item=2,
        group_list_type=1,
        group_type=0,
        group_list=group_list,
        output_dtype=original_dtype,
    )[0]

    gate_up_out = apply_swiglu(gate_up_out)

    if gate_up_out.dtype != w2_weight.dtype:
        quant_gate_up_out = torch_npu.npu_quantize(
            gate_up_out,
            scales=w2_input_scale,
            zero_points=None,
            dtype=torch.qint8,
            axis=-1,
            div_mode=True
        )
    else:
        quant_gate_up_out = gate_up_out

    down_out = torch_npu.npu_grouped_matmul(
        x=[quant_gate_up_out],
        weight=[w2_weight],
        scale=[w2_weight_scale*w2_input_scale[0]],
        split_item=2,
        group_list_type=1,
        group_type=0,
        group_list=group_list,
        output_dtype=original_dtype,
    )[0]

    return down_out


def apply_quant_gmm_pertensor_300I(
    sorted_hidden_states: torch.Tensor,
    w13: torch.Tensor,
    w13_scale: torch.Tensor,
    w13_input_scale: torch.Tensor,
    w13_input_offset: torch.Tensor,
    w2: torch.Tensor,
    w2_scale: torch.Tensor,
    w2_input_scale: torch.Tensor,
    w2_input_offset: torch.Tensor,
    group_list: torch.Tensor,
    up_smooth_scales: torch.Tensor,
    down_smooth_scales: torch.Tensor,
    topk_scales: torch.Tensor
) -> torch.Tensor:

    gate_up_out = torch_npu.npu_quant_grouped_matmul_dequant(
        x=sorted_hidden_states,
        quantized_weight=w13,
        weight_scale=w13_scale,
        group_list=group_list,
        x_scale=w13_input_scale,
        smooth_scale=up_smooth_scales,
        bias=None,
        quant_mode='pertensor',
    )

    gate_up_out = apply_swiglu(gate_up_out)
    gate_up_out *= topk_scales

    # Align feature dimension (300I requires multiple of 16)
    remainder = gate_up_out.size(-1) % 16
    if remainder != 0:
        pad_size = 16 - remainder
        pad_tensor = torch.zeros(
            gate_up_out.size(0), pad_size,
            dtype=gate_up_out.dtype,
            device=gate_up_out.device
        )
        gate_up_out = torch.cat([gate_up_out, pad_tensor], dim=-1)

        w2_pad = torch.zeros(
            w2.size(0), w2.size(1), pad_size,
            dtype=w2.dtype,
            device=w2.device
        )
        w2 = torch.cat([w2, w2_pad], dim=2)

    down_out = torch_npu.npu_quant_grouped_matmul_dequant(
        x=gate_up_out,
        quantized_weight=w2,
        weight_scale=w2_scale,
        group_list=group_list,
        x_scale=w2_input_scale,
        smooth_scale=down_smooth_scales,
        bias=None,
        quant_mode='pertensor',
    )
    
    return down_out

def apply_quant_gmm_pertoken_300I(
    sorted_hidden_states: torch.Tensor,
    w13: torch.Tensor,
    w13_scale: torch.Tensor,
    w2: torch.Tensor,
    w2_scale: torch.Tensor,
    group_list: torch.Tensor,
    up_smooth_scales: torch.Tensor,
    down_smooth_scales: torch.Tensor,
    topk_scales: torch.Tensor
) -> torch.Tensor:

    gate_up_out = torch_npu.npu_quant_grouped_matmul_dequant(
        x=sorted_hidden_states,
        quantized_weight=w13,
        weight_scale=w13_scale,
        group_list=group_list,
        smooth_scale=up_smooth_scales,
        bias=None,
        quant_mode='pertoken',
    )

    gate_up_out = apply_swiglu(gate_up_out)
    gate_up_out *= topk_scales

    # Align feature dimension (300I requires multiple of 16)
    remainder = gate_up_out.size(-1) % 16
    if remainder != 0:
        pad_size = 16 - remainder
        pad_tensor = torch.zeros(
            gate_up_out.size(0), pad_size,
            dtype=gate_up_out.dtype,
            device=gate_up_out.device
        )
        gate_up_out = torch.cat([gate_up_out, pad_tensor], dim=-1)

        w2_pad = torch.zeros(
            w2.size(0), w2.size(1), pad_size,
            dtype=w2.dtype,
            device=w2.device
        )
        w2 = torch.cat([w2, w2_pad], dim=2)
    
    down_out = torch_npu.npu_quant_grouped_matmul_dequant(
        x=gate_up_out,
        quantized_weight=w2,
        weight_scale=w2_scale,
        group_list=group_list,
        smooth_scale=down_smooth_scales,
        bias=None,
        quant_mode='pertoken',
    )
    
    return down_out
