# =============================================================================
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================

import os
from dataclasses import dataclass
from glob import glob
from typing import Optional, Set, Tuple

import numpy as np
import torch
import torch.distributed as dist
import torch_npu
from safetensors.torch import safe_open
from torch import nn
from torch.nn.parameter import Parameter
from tqdm import tqdm

from layers import (
    ColumnParallelLinear,
    Linear,
    PanguRotaryEmbedding,
    ParallelEmbedding,
    RMSNorm,
    RowParallelLinear,
    apply_gmm,
    apply_quant_gmm_pertensor_300I,
    apply_quant_gmm_pertoken_300I,
    apply_quant_gmm_pertensor_800I,
    apply_quant_gmm_pertoken_800I,
    apply_swiglu,
)

from quant_tool import comm_quant
from utils import (
    ARGS,
    InferenceParams,
    all_gather,
    partition_input_dp,
    print_rank_0,
    random_sampling,
    reduce_scatter,
    quant_mapping
)
import utils


@dataclass
class ModelArgs:
    # Model architecture
    hidden_act: str = "silu"
    hidden_size: int = 5120
    num_hidden_layers: int = 48
    num_attention_heads: int = 40
    num_key_value_heads: int = 8
    num_experts: int = 64
    num_experts_per_tok: int = 8
    moe_intermediate_size: int = 1344
    shared_expert_intermediate_size: int = 5376
    rms_norm_eps: float = 1e-5
    # Position embedding
    model_max_seq_len: int = 131072
    max_position_embeddings: int = 131072
    rope_theta: float = 16000000.0
    # Vocabulary  
    vocab_size: int = 153376
    pad_token_id: int = 0
    quant_config = None


class PanguAttention(nn.Module):
    def __init__(self, model_args: ModelArgs, layer_id):
        super().__init__()
        self.hidden_size = model_args.hidden_size
        self.num_heads = model_args.num_attention_heads
        self.num_local_heads = self.num_heads // ARGS.atten_tp_size
        self.num_kv_heads = model_args.num_key_value_heads
        self.num_local_kv_heads = model_args.num_key_value_heads // ARGS.atten_tp_size
        self.num_kv_groups = self.num_heads // self.num_kv_heads
        self.head_dim = self.hidden_size // self.num_heads

        self.q_size = self.num_local_heads * self.head_dim
        self.kv_size = self.num_local_kv_heads * self.head_dim
        
        qkv_proj_quant_type, o_proj_quant_type = None, None
        if model_args.quant_config is not None:
            quant_config = model_args.quant_config
            qkv_proj_quant_type = quant_config[f"model.layers.{layer_id}.self_attn.q_proj.weight"]
            o_proj_quant_type = quant_config[f"model.layers.{layer_id}.self_attn.o_proj.weight"]
        
        self.qkv_proj = ColumnParallelLinear(
            self.hidden_size,
            self.head_dim * (self.num_heads + 2 * self.num_kv_heads),
            tp_size=ARGS.atten_tp_size,
            bias=True,
            dtype=qkv_proj_quant_type
        )
        self.o_proj = RowParallelLinear(
            self.num_heads * self.head_dim,
            self.hidden_size,
            tp_size=ARGS.atten_tp_size,
            bias=True,
            dtype=o_proj_quant_type,
            reduce_results=not ARGS.H2P
        )
        self.scale = self.head_dim ** -0.5
        kv_cache_length = ARGS.en_sequence_len + ARGS.max_new_tokens
        kv_shape = (ARGS.mini_batch_size, self.num_local_kv_heads, kv_cache_length, self.head_dim)

        self.quant_cache = (ARGS.kv_scale_path is not None and not ARGS.kv_quant_calibrate) or \
            (model_args.quant_config is not None and model_args.quant_config["kv_quant_type"] == 'C8')
        kv_dtype = torch.int8 if self.quant_cache else ARGS.float_dtype

        self.register_buffer("k_cache", torch.zeros(kv_shape, dtype=kv_dtype), persistent=False)
        self.register_buffer("v_cache", torch.zeros(kv_shape, dtype=kv_dtype), persistent=False)

        if self.quant_cache:
            # The separated kv cache is converted by the use_internal_format_weight function.
            self.kv_cache_scale = nn.Parameter(torch.empty(2, self.num_local_kv_heads * self.head_dim, dtype=torch.float16), requires_grad=False)
            self.kv_cache_offset = nn.Parameter(torch.empty(2, self.num_local_kv_heads * self.head_dim, dtype=torch.float16), requires_grad=False)

    def forward(
        self,
        x: torch.Tensor,
        cos: torch.Tensor, 
        sin: torch.Tensor, 
        mask: Optional[torch.Tensor],
        inference_params: InferenceParams = None
    ) -> torch.Tensor:
        # BSH -> TD
        batch_size, seq_len, hidden_dim = inference_params.input_shape
        qkv = self.qkv_proj(x)

        q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
        # TD -> BSND
        q = q.view(batch_size, seq_len, self.num_local_heads, self.head_dim).contiguous()
        k = k.view(batch_size, seq_len, self.num_local_kv_heads, self.head_dim).contiguous()
        v = v.view(batch_size, seq_len, self.num_local_kv_heads, self.head_dim).contiguous()

        q, k = torch_npu.npu_apply_rotary_pos_emb(
                q, k, cos.contiguous(), sin.contiguous()
                )
        # Prefill stage
        if inference_params.is_prefill:
            batch_start = inference_params.batch_size_offset
            batch_end = batch_start + batch_size

            # kv quant
            if self.quant_cache:
                quant_k = torch_npu.npu_quantize(k.view(batch_size, seq_len, -1), self.k_cache_scale.view(-1), None, torch.qint8, -1, True)
                quant_v = torch_npu.npu_quantize(v.view(batch_size, seq_len, -1), self.v_cache_scale.view(-1), None, torch.qint8, -1, True)

                quant_k = quant_k.view(batch_size, seq_len, self.num_local_kv_heads, self.head_dim).transpose(1, 2).contiguous()
                quant_v = quant_v.view(batch_size, seq_len, self.num_local_kv_heads, self.head_dim).transpose(1, 2).contiguous()

            q = q.transpose(1, 2).contiguous()
            k = k.transpose(1, 2).contiguous()
            v = v.transpose(1, 2).contiguous()
            
            kv_len = k.shape[2]
            if batch_end - batch_start == ARGS.mini_batch_size and ARGS.compile:
                zero_start_pos = torch.zeros(ARGS.mini_batch_size, dtype=torch.int32, device=k.device)
                torch_npu.scatter_update_(self.k_cache, zero_start_pos, quant_k if self.quant_cache else k, axis=2)
                torch_npu.scatter_update_(self.v_cache, zero_start_pos, quant_v if self.quant_cache else v, axis=2)
            else:
                self.k_cache[batch_start:batch_end, :, :kv_len] = quant_k if self.quant_cache else k
                self.v_cache[batch_start:batch_end, :, :kv_len] = quant_v if self.quant_cache else v

            attn_output = torch_npu.npu_prompt_flash_attention(
                q, k, v,
                num_heads=self.num_local_heads,
                num_key_value_heads=self.num_local_kv_heads,
                input_layout="BNSD",
                atten_mask=mask.bool(),
                scale_value=self.scale,
            )
            attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size * seq_len, -1)

        # Decode stage
        else:
            if self.quant_cache:
                k = torch_npu.npu_quantize(k.view(batch_size, seq_len, -1), self.k_cache_scale.view(-1), None, torch.qint8, -1, True)
                v = torch_npu.npu_quantize(v.view(batch_size, seq_len, -1), self.v_cache_scale.view(-1), None, torch.qint8, -1, True)

            q = q.reshape(batch_size, self.num_local_heads, 1, self.head_dim)
            k = k.reshape(batch_size, self.num_local_kv_heads, 1, self.head_dim)
            v = v.reshape(batch_size, self.num_local_kv_heads, 1, self.head_dim)

            torch_npu.scatter_update_(self.k_cache, inference_params.updated_kv_positions, k, axis=2)
            torch_npu.scatter_update_(self.v_cache, inference_params.updated_kv_positions, v, axis=2)
            
            # repeat kv for 300I with old CANN TODO remove here
            attn_output = torch_npu.npu_incre_flash_attention(
                q, self.k_cache, self.v_cache,
                num_heads=self.num_local_heads,
                num_key_value_heads=self.num_local_kv_heads,
                scale_value=self.scale,
                atten_mask=mask,
                input_layout="BNSD",
                antiquant_scale=self.kv_cache_scale if self.quant_cache else None,
                antiquant_offset=self.kv_cache_offset if self.quant_cache else None,
            )
            attn_output = attn_output.view(batch_size * seq_len, -1)
            
        output = self.o_proj(attn_output)
        return output


class PanguMLP(nn.Module):
    def __init__(self, hidden_size: int, moe_intermediate_size: int, reduce_results: bool = False, dtype: str = None):
        super().__init__()

        self.dynamic_enable = False
        if dtype is not None:
            self.dynamic_enable = "DYNAMIC" in dtype

        self.gate_up_proj = ColumnParallelLinear(
            hidden_size,
            moe_intermediate_size * 2,
            tp_size=ARGS.share_expert_tp_size,
            bias=False,
            dtype=dtype
        )
        self.down_proj = RowParallelLinear(
            moe_intermediate_size,
            hidden_size,
            tp_size=ARGS.share_expert_tp_size,
            bias=False,
            dtype=dtype,
            reduce_results=reduce_results
        )

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        original_dtype = x.dtype
        if ARGS.hardware == '800I' and self.dynamic_enable:
            x, pertoken_scale = torch_npu.npu_dynamic_quant(x)
            gate_up_out = torch_npu.npu_quant_matmul(
                x,
                self.gate_up_proj.weight.transpose(0,1),
                scale=self.gate_up_proj.weight_scale,
                bias=None,
                output_dtype=torch.int32,
            )
            gate_up_out, swiglu_out_scale = torch_npu.npu_dequant_swiglu_quant(
                gate_up_out, 
                weight_scale=self.gate_up_proj.weight_scale, 
                activation_scale=pertoken_scale, 
                activate_left=True, 
                quant_mode=1
            )
            down_out = torch_npu.npu_quant_matmul(
                gate_up_out,
                self.down_proj.weight.transpose(0,1),
                scale=self.down_proj.weight_scale,
                pertoken_scale=swiglu_out_scale,
                bias=None,
                output_dtype=original_dtype,
            )
        else:
            gate_up_out = self.gate_up_proj(x)
            gate_up_out = apply_swiglu(gate_up_out)
            down_out = self.down_proj(gate_up_out)

        return down_out


class ExpertBalancer(torch.nn.Module):
    def __init__(self, k: int, num_experts: int, num_groups: int):
        super().__init__()
        if num_experts % num_groups != 0:
            raise ValueError("num_experts must be divisible by num_groups.")

        self.k = k
        self.num_experts = num_experts
        self.num_groups = num_groups
        self.experts_per_group = num_experts // num_groups

        self.group_expert_indices = torch.arange(
            self.experts_per_group, dtype=torch.int32
        ).view(1, 1, -1).npu()

        self.group_expert_offset = (
            torch.arange(num_groups, dtype=torch.int32) * self.experts_per_group
        ).unsqueeze(0).npu()

        self.expert_index_range = torch.arange(
            self.experts_per_group, dtype=torch.int32
        ).npu()

    def forward(self, scores: torch.Tensor):
        num_tokens = scores.size(0)
        scores_grouped = scores.view(num_tokens, self.num_groups, self.experts_per_group)
        best_expert_idx = torch.argmax(scores_grouped, dim=2)  # (num_tokens, num_groups)
        vote_mask = (
            best_expert_idx.unsqueeze(-1).to(torch.int32) == self.group_expert_indices
        ).to(ARGS.float_dtype)

        expert_vote_freq = vote_mask.sum(dim=0)

        sorted_indices = torch.argsort(expert_vote_freq, dim=1, descending=True).to(torch.int32)
        topk_experts = sorted_indices[:, :self.k]
        keep_mask = (
            (topk_experts.unsqueeze(-1) == self.expert_index_range).any(dim=1)
        ).unsqueeze(0)

        masked_scores = torch.where(keep_mask, scores_grouped, 0)

        topk_weight, best_pos_in_group = masked_scores.max(dim=2)
        best_pos_in_group = best_pos_in_group.to(torch.int32)

        topk_idx = (best_pos_in_group + self.group_expert_offset).to(torch.int32)

        return topk_weight, topk_idx


class PanguFusedMoeBlock(nn.Module):
    def __init__(self, model_args, dtype:str):
        super().__init__()
        
        self.local_num_experts = model_args.num_experts // ARGS.routed_expert_ep_size
        self.intermediate_size_per_partition = model_args.moe_intermediate_size // ARGS.routed_expert_tp_size
        self.local_num_groups = model_args.num_experts_per_tok // ARGS.routed_expert_ep_size
        self.num_experts_per_tok = model_args.num_experts_per_tok

        self.quant_enable = False
        if dtype is not None:
            self.quant_enable = "W8A8" in dtype
            self.dynamic_enable = "DYNAMIC" in dtype
            dtype = quant_mapping[dtype]
        else:
            dtype = ARGS.model_dtype

        self.w13_weight = Parameter(torch.empty(
            self.local_num_experts,
            model_args.hidden_size,
            2 * self.intermediate_size_per_partition,
            dtype=dtype), requires_grad=False)

        self.w2_weight = Parameter(torch.empty(
            self.local_num_experts,
            self.intermediate_size_per_partition,
            model_args.hidden_size,
            dtype=dtype), requires_grad=False)
        
        self.zero_tensor = torch.zeros(1, dtype=torch.int32).npu()

        self.expert_balancer = ExpertBalancer(
            k=ARGS.expert_balance_k,
            num_experts=self.local_num_experts,
            num_groups=self.local_num_groups
        )
        
        if self.quant_enable:
            self._init_quant_params(model_args)

        self.topk_id_bias = torch.arange(
            0, self.local_num_experts, self.num_experts_per_tok, 
            dtype=torch.int32
        ).npu().unsqueeze(0)
        self.experts_id = torch.arange(0, self.local_num_experts, dtype=torch.int32).npu()

    def _init_quant_params(self, model_args):
        h, i = model_args.hidden_size, self.intermediate_size_per_partition
        e = self.local_num_experts

        def param(shape, dtype):
            return Parameter(torch.empty(*shape, dtype=dtype), requires_grad=False)

        self.w13_weight_scale = param((e, 2 * i), torch.float32)
        self.w13_weight_offset = param((e, 2 * i), torch.float16)
        self.w2_weight_scale = param((e, h), torch.float32)
        self.w2_weight_offset = param((e, h), torch.float16)
        # notice gmm tiling error
        if ARGS.global_batch_size != 1:
            self.up_smooth_scales = torch.ones(h, dtype=torch.float16).npu()
            self.down_smooth_scales = torch.ones(i, dtype=torch.float16).npu()
        else:
            self.up_smooth_scales = None
            self.down_smooth_scales = None
        # deq_scale must be int64
        if not self.dynamic_enable:
            self.w2_deq_scale = param((e, h), torch.int64)
            self.w13_deq_scale = param((e, 2 * i), torch.int64)
            self.w2_input_scale = param((e, i), torch.float32)
            self.w13_input_scale = param((e, h), torch.float32)
            self.w2_input_offset = param((e, i), torch.int8)
            self.w13_input_offset = param((e, h), torch.int8)
            self.w13_quant_bias = param((e, 2 * i), torch.int32)
            self.w2_quant_bias = param((e, h), torch.int32)

    def forward(
        self,
        hidden_states: torch.Tensor,
        router_logits: torch.Tensor,
        router_scale: torch.Tensor,
        inference_params: InferenceParams = None,
    ) -> torch.Tensor:
        num_tokens, _ = hidden_states.shape

        scores = torch.softmax(router_logits, dim=-1, dtype=torch.float32)
        scores = scores[
            ..., 
            ARGS.routed_expert_ep_rank * self.local_num_experts : 
            (ARGS.routed_expert_ep_rank + 1) * self.local_num_experts
        ].to(ARGS.float_dtype)

        if not inference_params.is_prefill and ARGS.expert_balance_k < 8:
            topk_weights, topk_ids = self.expert_balancer(scores)
        else:
            topk_weights, topk_ids = scores.view(num_tokens, self.local_num_groups, -1).max(-1)
            topk_ids = topk_ids.to(torch.int32) + self.topk_id_bias

        flatten_topk_ids = topk_ids.view(-1)
        topk_weights *= router_scale.index_select(0, flatten_topk_ids).view(topk_ids.shape)

        if ARGS.hardware == '800I':
            original_dtype = hidden_states.dtype
            if self.quant_enable:
                if self.dynamic_enable:
                    hidden_states, w13_input_scale = torch_npu.npu_dynamic_quant(hidden_states)
                else:
                    hidden_states = torch_npu.npu_quantize(hidden_states,
                        self.w13_input_scale, None, torch.qint8, -1, True
                    )

            expanded_x, expanded_row_idx, expert_token_count, expanded_scale = torch_npu.npu_moe_init_routing_v2(
                hidden_states,
                topk_ids,
                scale=w13_input_scale if (self.quant_enable and self.dynamic_enable) else None,
                active_num=topk_ids.numel(),
                expert_capacity=-1,
                expert_num=self.local_num_experts,
                drop_pad_mode=0,
                expert_tokens_num_type=1,
                expert_tokens_num_flag=True,
                quant_mode=-1,
                active_expert_range=[0, self.local_num_experts],
                row_idx_type=0,
            )

            if self.quant_enable:
                if self.dynamic_enable:
                    down_out = apply_quant_gmm_pertoken_800I(
                        expanded_x, self.w13_weight, self.w13_weight_scale, self.w13_weight_offset,
                        expanded_scale, self.w2_weight, self.w2_weight_scale, self.w2_weight_offset,
                        None, expert_token_count, original_dtype=original_dtype
                    )
                else:
                    down_out = apply_quant_gmm_pertensor_800I(
                        expanded_x, self.w13_weight, self.w13_weight_scale, self.w13_weight_offset,
                        self.w13_input_scale, self.w13_input_offset, self.w2_weight, 
                        self.w2_weight_scale, self.w2_weight_offset, self.w2_input_scale, 
                        self.w2_input_offset, expert_token_count, original_dtype=original_dtype
                    )
            else:
                down_out = apply_gmm(
                    expanded_x, self.w13_weight, self.w2_weight,
                    group_list=expert_token_count, group_list_type=1
                )

            unsorted_hidden_states = torch_npu.npu_moe_finalize_routing(
                down_out.to(torch.float16), skip1=None, skip2=None, bias=None,
                scales=topk_weights.to(torch.float16),
                expanded_src_to_dst_row=expanded_row_idx,
                export_for_source_row=topk_ids,
                drop_pad_mode=2
            ).to(ARGS.float_dtype)
        else:
            sorted_topk_ids = torch.argsort(flatten_topk_ids.float()).to(torch.int32) + self.zero_tensor
            sorted_hidden_states = hidden_states.index_select(0, sorted_topk_ids // self.local_num_groups)
            num_tokens_per_expert = (flatten_topk_ids.unsqueeze(-1) == self.experts_id).to(torch.float32).sum(0)

            topk_scales = topk_weights.view(-1).index_select(0, sorted_topk_ids).unsqueeze(-1)
            group_list = num_tokens_per_expert.cumsum(dim=0).to(torch.int64)

            if self.quant_enable:
                if self.dynamic_enable:
                    down_out = apply_quant_gmm_pertoken_300I(
                        sorted_hidden_states,
                        self.w13_weight, self.w13_weight_scale, 
                        self.w2_weight, self.w2_weight_scale, 
                        group_list, self.up_smooth_scales, self.down_smooth_scales,
                        topk_scales=topk_scales
                    )
                else:
                    down_out = apply_quant_gmm_pertensor_300I(
                        sorted_hidden_states,
                        self.w13_weight, self.w13_weight_scale, self.w13_input_scale, self.w13_input_offset,
                        self.w2_weight, self.w2_weight_scale, self.w2_input_scale, self.w2_input_offset,
                        group_list, self.up_smooth_scales, self.down_smooth_scales,
                        topk_scales=topk_scales
                    )
            else:
                down_out = apply_gmm(
                    sorted_hidden_states,
                    self.w13_weight, self.w2_weight,
                    group_list=group_list, group_list_type=0,
                    topk_scales=topk_scales
                )

            unsorted_topk_ids = torch.argsort(sorted_topk_ids.float()).to(torch.int32) + self.zero_tensor
            unsorted_hidden_states = down_out.index_select(0, unsorted_topk_ids)
            unsorted_hidden_states = unsorted_hidden_states.view(
                num_tokens, self.num_experts_per_tok // ARGS.routed_expert_ep_size, -1
            ).sum(1)

        return unsorted_hidden_states


class PanguSparseMoeBlock(nn.Module):
    def __init__(self, model_args, layer_id):
        super().__init__()
        self.num_experts = model_args.num_experts
        self.top_k = model_args.num_experts_per_tok

        # gating
        self.gate = nn.Linear(
            model_args.hidden_size,
            model_args.num_experts,
            bias=False,
            dtype=ARGS.float_dtype
        )
        
        routed_expert_dtype, share_expert_dtype = None, None
        if model_args.quant_config is not None:
            quant_config = model_args.quant_config
            routed_expert_dtype = quant_config[f"model.layers.{layer_id}.mlp.experts.0.gate_proj.weight"]
            share_expert_dtype = quant_config[f"model.layers.{layer_id}.mlp.shared_expert.gate_proj.weight"]
        
        self.experts = PanguFusedMoeBlock(model_args, routed_expert_dtype)
        self.shared_expert = PanguMLP(
            model_args.hidden_size,
            model_args.shared_expert_intermediate_size,
            reduce_results=False,
            dtype = share_expert_dtype
        )
        self.router_scale = torch.nn.Parameter(torch.ones((model_args.num_experts,), dtype=ARGS.float_dtype), requires_grad=False)
        self.gate_zero_bias = torch.zeros((1,), dtype=torch.float32).npu()
        
    def forward(self, hidden_states: torch.Tensor, inference_params: InferenceParams = None) -> torch.Tensor:
        router_logits = self.gate(hidden_states) + self.gate_zero_bias

        shared_output = self.shared_expert(hidden_states)

        expert_output = self.experts(
            hidden_states,
            router_logits,
            self.router_scale,
            inference_params
        )

        final_output = expert_output + shared_output

        if ARGS.world_size > 1 and not ARGS.H2P:
            dist.all_reduce(final_output)

        return final_output


class PanguDecoderLayer(nn.Module):
    def __init__(self, layer_id: int, model_args: ModelArgs):
        super().__init__()
        self.model_args = model_args
        self.layer_id = layer_id
        self.num_layers = model_args.num_hidden_layers

        self.self_attn = PanguAttention(model_args, layer_id)
        self.mlp = PanguSparseMoeBlock(model_args, layer_id)

        self.input_layernorm = RMSNorm(model_args.hidden_size, model_args.rms_norm_eps)
        self.post_attention_layernorm = RMSNorm(model_args.hidden_size, model_args.rms_norm_eps)
        self.comm_offset = torch.zeros(model_args.hidden_size, dtype=torch.float).npu()
    
    def forward(
        self,
        hidden_states: torch.Tensor,
        residual: Optional[torch.Tensor],
        cos: torch.Tensor, 
        sin: torch.Tensor, 
        mask: Optional[torch.Tensor],
        inference_params: InferenceParams = None
    ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
        # BSH -> TD
        if residual is None:
            residual = hidden_states
            hidden_states = self.input_layernorm(hidden_states)
        else:
            hidden_states, residual = self.input_layernorm(hidden_states, residual)
        
        if ARGS.comm_quant_calibrate:
            comm_quant(layer_id=self.layer_id - 1, activation=hidden_states, indice=1)

        if ARGS.H2P and self.layer_id > 0:
            hidden_states = all_gather(hidden_states, ARGS.atten_tp_group, ARGS.atten_tp_size)
            if inference_params.pad_len != 0:
                hidden_states = hidden_states.index_select(dim=0, index=inference_params.unpad_idx)

        hidden_states = self.self_attn(hidden_states, cos, sin, mask, inference_params)
        
        if ARGS.H2P:
            if inference_params.pad_len != 0:
                hidden_states = hidden_states.index_select(dim=0, index=inference_params.pad_idx)
            
            hidden_states = reduce_scatter(hidden_states, ARGS.atten_tp_group, ARGS.atten_tp_size)
            
            if self.layer_id == 0:
                if inference_params.pad_len != 0:
                    residual = residual.index_select(dim=0, index=inference_params.pad_idx)
                residual = torch.tensor_split(residual, ARGS.atten_tp_size)[ARGS.atten_tp_rank]

        hidden_states, residual = self.post_attention_layernorm(hidden_states, residual)

        if ARGS.comm_quant_calibrate:
           comm_quant(layer_id=self.layer_id, activation=hidden_states, indice=0)

        if ARGS.H2P:
            quant_flag = ARGS.comm_scale_path is not None and not ARGS.comm_quant_calibrate and not inference_params.is_prefill
            hidden_states = all_gather(
                hidden_states,
                ARGS.all_rank_group,
                ARGS.world_size,
                quant_flag=quant_flag,
                scale=self.comm_scale1 if quant_flag else None,
                offset=self.comm_offset if quant_flag else None
            )
        else:
            hidden_states = all_gather(hidden_states, ARGS.atten_dp_group, ARGS.atten_dp_size)

        hidden_states = self.mlp(hidden_states, inference_params)

        if ARGS.H2P:
            hidden_states = reduce_scatter(hidden_states, ARGS.all_rank_group, ARGS.world_size)
            # last layer need to all gather
            if self.layer_id == self.num_layers - 1:
                hidden_states = hidden_states + residual
                residual = None
                if ARGS.comm_quant_calibrate:
                    comm_quant(layer_id=self.layer_id, activation=hidden_states, indice=1)
                hidden_states = all_gather(hidden_states, ARGS.atten_tp_group, ARGS.atten_tp_size)

                if inference_params.pad_len != 0:
                    hidden_states = hidden_states.index_select(dim=0, index=inference_params.unpad_idx)
        else:
            # dp need to slice data
            hidden_states = hidden_states.chunk(ARGS.atten_dp_size, dim=0)[ARGS.atten_dp_rank]

        return hidden_states, residual


class PanguModel(nn.Module):
    def __init__(self, model_args: ModelArgs):
        super().__init__()
        self.embed_tokens = ParallelEmbedding(model_args.vocab_size, model_args.hidden_size)
        self.num_hidden_layers = model_args.num_hidden_layers
        
        self.hidden_size = model_args.hidden_size
        self.num_heads = model_args.num_attention_heads
        self.head_dim = self.hidden_size // self.num_heads
        self.rotary_emb = PanguRotaryEmbedding(
            self.head_dim,
            rotary_dim=self.head_dim,
            max_position_embeddings=model_args.max_position_embeddings,
            base=model_args.rope_theta,
            dtype=ARGS.float_dtype,
        )
        
        self.layers = nn.ModuleList([
            PanguDecoderLayer(layer_id, model_args)
            for layer_id in range(self.num_hidden_layers)
        ])
        self.norm = RMSNorm(model_args.hidden_size, model_args.rms_norm_eps)

    def forward(
        self,
        tokens: torch.Tensor,
        position_ids: torch.Tensor,
        start_pos: torch.Tensor,
        attention_mask: Optional[torch.Tensor] = None,
        inference_params: Optional[InferenceParams] = None
    ) -> torch.Tensor:
        # BSH
        hidden_states = self.embed_tokens(tokens)
        residual = None
        cos, sin = self.rotary_emb(position_ids)

        self._prepare_inference_params(hidden_states, start_pos, inference_params)

        for layer in self.layers:
            hidden_states, residual = layer(
                # BSH->TD
                hidden_states.view(-1, hidden_states.size(-1)),
                residual,
                cos, 
                sin,
                mask=attention_mask,
                inference_params=inference_params
            )

        if residual is None:
            hidden_states = self.norm(hidden_states)
        else:
            hidden_states, _ = self.norm(hidden_states, residual)

        # TD-> BSH
        return hidden_states.view(inference_params.input_shape)

    def _prepare_inference_params(self, hidden_states: torch.Tensor, start_pos: torch.Tensor, inference_params: Optional[InferenceParams]):
        if inference_params is None:
            return
        # update position
        inference_params.updated_kv_positions = start_pos - 1

        # tokens padding 
        batch_size, seq_len, _ = hidden_states.shape
        token_len = batch_size * seq_len
        inference_params.input_shape = hidden_states.shape
        inference_params.token_len = token_len
        
        # world_size = atten_dp_size * atten_tp_size
        pad_len = (ARGS.atten_tp_size - (token_len % ARGS.atten_tp_size)) % ARGS.atten_tp_size
        inference_params.pad_len = pad_len

        if pad_len > 0:
            device = hidden_states.device
            unpad_idx = torch.arange(token_len, dtype=torch.int32, device=device)
            pad_idx = torch.cat([
                unpad_idx,
                torch.zeros(pad_len, dtype=torch.int32, device=device)
            ])
            inference_params.pad_idx = pad_idx
            inference_params.unpad_idx = unpad_idx


class PanguForCausalLM(nn.Module):
    def __init__(self, model_args: ModelArgs):
        super().__init__()
        self.vocab_shard_offset = (model_args.vocab_size // ARGS.world_size) * ARGS.rank
        self.num_experts = model_args.num_experts
        self.model = PanguModel(model_args)
        self.lm_head = ColumnParallelLinear(
            model_args.hidden_size,
            model_args.vocab_size,
            dtype="FLOAT" if ARGS.float_dtype == torch.float16 else "BFLOAT",
            tp_size=ARGS.world_size,
            bias=False
        )

    def forward(self,
        tokens: torch.Tensor,
        position_ids: torch.Tensor,
        start_pos: torch.Tensor,
        attention_mask: Optional[torch.Tensor] = None,
        inference_params: Optional[InferenceParams] = None):

        if inference_params.is_prefill:
            return self._forward_prefill(tokens, position_ids, start_pos, attention_mask, inference_params)
        else:
            return self.decode(tokens, position_ids, start_pos, attention_mask, inference_params)

    def _forward_prefill(self,
        tokens: torch.Tensor,
        position_ids: torch.Tensor,
        start_pos: torch.Tensor,
        attention_mask: Optional[torch.Tensor],
        inference_params: InferenceParams):

        tokens, position_ids, attention_mask, start_pos = partition_input_dp(
            tokens=tokens,
            position_ids=position_ids,
            attention_mask=attention_mask,
            start_pos=start_pos
        )

        # support multi-prefill
        hidden_states_list = []
        step = ARGS.prefill_batch_size // ARGS.atten_dp_size
        for i in range(0, tokens.size(0), step):
            hidden_states = self.prefill(
                tokens[i:i + step],
                position_ids[i:i + step],
                start_pos[i:i + step],
                attention_mask[i:i + step],
                inference_params
            )
            inference_params.batch_size_offset += step
            hidden_states = self._select_last_token(hidden_states, start_pos[i:i + step])
            hidden_states_list.append(hidden_states)
        hidden_states = torch.cat(hidden_states_list, dim=0)

        hidden_states = all_gather(
            hidden_states,
            group=ARGS.atten_dp_group,
            world_size=ARGS.atten_dp_size
        )

        logits = self.lm_head(hidden_states)
        return self._sample_next_token(logits, inference_params)

    def _select_last_token(
        self,
        hidden_states: torch.Tensor,
        start_pos: torch.Tensor
    ) -> torch.Tensor:
        # BSH-> SBH
        hidden_states = hidden_states.transpose(0, 1).contiguous()
        seq_len, batch_size, hidden_size = hidden_states.shape

        if seq_len > 1:
            select_index = start_pos
        else:
            select_index = torch.ones_like(start_pos)

        last_inds = (select_index[None, :, None] - 1).repeat(1, 1, hidden_size)
        selected_hidden = torch.gather(hidden_states, 0, last_inds)
        return selected_hidden.transpose(0, 1).contiguous()

    def prefill(self, 
        tokens: torch.Tensor, 
        position_ids: torch.Tensor, 
        start_pos: torch.Tensor, 
        attention_mask: torch.Tensor = None, 
        inference_params: InferenceParams = None):
        return self.model(tokens, position_ids, start_pos, attention_mask, inference_params)

    def decode(self, 
        tokens: torch.Tensor, 
        position_ids: torch.Tensor, 
        start_pos: torch.Tensor, 
        attention_mask: torch.Tensor = None,
        inference_params: InferenceParams = None):

        tokens, position_ids, attention_mask, start_pos = partition_input_dp(
            tokens=tokens,
            position_ids=position_ids,
            attention_mask=attention_mask,
            start_pos=start_pos
        )
        
        hidden_states = self.model(tokens, position_ids, start_pos, attention_mask, inference_params)

        hidden_states = all_gather(
            hidden_states,
            group=ARGS.atten_dp_group,
            world_size=ARGS.atten_dp_size
        )

        logits = self.lm_head(hidden_states)
        return self._sample_next_token(logits, inference_params)

    def _sample_next_token(self, logits: torch.Tensor, inference_params: InferenceParams):
        if inference_params.temperature > 0:
            all_logits = all_gather(logits, group=None, world_size=ARGS.world_size)
            all_logits = all_logits.view(ARGS.world_size, -1, all_logits.shape[-1]).transpose(0, 1).reshape(logits.shape[0], -1)
            next_token = random_sampling(
                all_logits,
                temperature=inference_params.temperature,
                top_k=inference_params.top_k,
                top_p=inference_params.top_p
            )
            return next_token

        # Deterministic (argmax) mode
        if ARGS.world_size > 1:
            local_max_logits, local_max_index = torch.max(logits, dim=-1, keepdim=True)
            local_max_index += self.vocab_shard_offset
            combined = torch.cat([local_max_logits.to(torch.float32), local_max_index.to(torch.float32)], dim=-1)

            all_max_logits = all_gather(combined, group=None, world_size=ARGS.world_size)
            all_max_logits = all_max_logits.view(ARGS.world_size, -1, 2).transpose(0, 1)

            max_indices = torch.argmax(all_max_logits[:, :, 0], dim=-1)
            next_token = all_max_logits[torch.arange(all_max_logits.size(0)), max_indices][:, 1:].to(torch.int64)
        else:
            next_token = logits.argmax(dim=-1)

        return next_token

    def load_weights(self, file_path: str):
        # Parameters to ignore
        ignore_param_list = {
            "input_layernorm.bias",
            "input_layernorm.module.bias",
            "input_layernorm.module.weight",
            "post_attention_layernorm.bias",
            "post_attention_layernorm.module.bias",
            "post_attention_layernorm.module.weight",
            "model.norm.bias",
            "model.norm.module.bias",
            "model.norm.module.weight",
        }

        # Mapping for stacked parameters (e.g., q, k, v)
        stacked_params_mapping = [
            ("qkv_proj", "q_proj", "q"),
            ("qkv_proj", "k_proj", "k"),
            ("qkv_proj", "v_proj", "v"),
            ("gate_up_proj", "gate_proj", 0),
            ("gate_up_proj", "up_proj", 1),
        ]

        # Expert parameter mapping (e.g., MoE)
        expert_params_mapping = utils.make_expert_params_mapping(
            ckpt_gate_proj_name="gate_proj",
            ckpt_down_proj_name="down_proj",
            ckpt_up_proj_name="up_proj",
            num_experts=self.num_experts
        )

        params_dict = dict(self.named_parameters())
        loaded_params: Set[str] = set()

        ckpt_files = glob(os.path.join(file_path, "*.safetensors"))
        if not ckpt_files:
            raise FileNotFoundError(f"No checkpoint files found in path: {file_path}")

        for ckpt_file in tqdm(ckpt_files, desc="Loading model weights"):
            with safe_open(ckpt_file, framework="pt", device="cpu") as f:
                for name in f.keys():
                    # Skip parameters from excess layers (if any)
                    if 'layers' in name:
                        try:
                            layer_idx = int(name.split('layers.')[-1].split('.')[0])
                            if layer_idx >= self.model.num_hidden_layers:
                                continue
                        except (ValueError, IndexError):
                            pass  # Ignore malformed layer names

                    # Skip ignored params
                    if any(skip_name in name for skip_name in ignore_param_list):
                        continue

                    weight_tensor = f.get_tensor(name).cpu()

                    # Check for stacked parameter match
                    for param_name, shard_name, shard_id in stacked_params_mapping:
                        # C8 support
                        if "kv_cache" in name and shard_name in name:
                            mapped_name = name.replace(shard_name+'.', '')
                            if mapped_name in params_dict:
                                param = params_dict[mapped_name]
                                utils.stacked_weight_loader(param, weight_tensor, mapped_name, shard_id)
                                loaded_params.add(mapped_name)
                            break
                        # qkv_proj and gate_up
                        elif shard_name in name and "mlp.experts" not in name:
                            mapped_name = name.replace(shard_name, param_name)
                            if mapped_name in params_dict:
                                param = params_dict[mapped_name]
                                utils.stacked_weight_loader(param, weight_tensor, mapped_name, shard_id)
                                loaded_params.add(mapped_name)
                            break

                    else:
                        # Try expert parameter mapping
                        matched = False
                        for param_name, weight_name, expert_id, shard_id in expert_params_mapping:
                            if weight_name in name:
                                mapped_name = name.replace(weight_name, param_name)
                                if mapped_name not in params_dict:
                                    raise KeyError(f"Expert param '{mapped_name}' not found in model parameters.")
                                param = params_dict[mapped_name]
                                utils.expert_weight_loader(param, weight_tensor, mapped_name, shard_id, expert_id)
                                loaded_params.add(mapped_name)
                                matched = True
                                break

                        # Handle general parameters
                        if not matched:
                            if name not in params_dict:
                                raise KeyError(f"Parameter '{name}' not found in model parameters.")
                            param = params_dict[name]
                            utils.others_weight_loader(param, weight_tensor, name)
                            loaded_params.add(name)

        # Validation: Check loading completeness
        missing_params = set(params_dict.keys()) - loaded_params
        if not missing_params:
            print_rank_0("All weights were loaded successfully!")
        else:
            print_rank_0(f"Some weights failed to load: {missing_params}")
