# =============================================================================
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================

import warnings
warnings.filterwarnings("ignore", category=UserWarning)

import json
import os
import random
from argparse import ArgumentParser
from dataclasses import fields
from pathlib import Path
from typing import List, Tuple

import numpy as np
import torch
import torch.distributed as dist
import torch_npu
import torchair
from torch import Tensor


quant_mapping = {
        "FLOAT": torch.float16,
        "BFLOAT": torch.bfloat16,
        "W8A8": torch.int8,
        "W8A8_DYNAMIC": torch.int8,
    }

ARGS = ArgumentParser().parse_args([])
def set_args():
    global ARGS
    parser = ArgumentParser(description="Model inference configuration")
    
    # === Input Parameters ===
    parser.add_argument("--ckpt-path", type=str, default=None)

    parser.add_argument("--input-texts", action="store_true",
                        help="Use a list of texts as the input")
    parser.add_argument("--input-file", type=str, default=None,
                        help="Path to JSONL file, each line must contain a JSON object with a 'prompt' field")
    parser.add_argument("--input-dummy", action="store_true",
                        help="Use randomly generated dummy input")
    parser.add_argument("--deterministic", action="store_true",
                        help="Enable deterministic dummy input (used with --input-dummy)")

    parser.add_argument("--global-batch-size", type=int, required=True)
    parser.add_argument("--prefill-batch-size", type=int, default=None)
    parser.add_argument("--en-sequence-len", type=int, default=2048)
    
    parser.add_argument("--max-new-tokens", type=int, default=128)
    parser.add_argument("--streaming", action="store_true", default=False)
    parser.add_argument("--temperature", type=float, default=0)
    parser.add_argument("--top-k", type=int, default=-1)
    parser.add_argument("--top-p", type=float, default=0.8)
    
    # === Parallelism Parameters ===
    parser.add_argument("--atten-tp-size", type=int, default=8)
    parser.add_argument("--atten-dp-size", type=int, default=1)
    parser.add_argument("--routed-expert-tp-size", type=int, default=1)
    parser.add_argument("--routed-expert-ep-size", type=int, default=8)
    parser.add_argument("--share-expert-tp-size", type=int, default=8)
    parser.add_argument("--share-expert-ep-size", type=int, default=1)

    # === Compilation Parameters ===
    parser.add_argument("--cache-compile-path", type=str, default=None)
    parser.add_argument("--compile",action="store_true", default=False,
                        help='Compile model to speed up inference.')
    
    # === Environment Parameters ===
    parser.add_argument("--ckpt-dtype", type=str, default="float16",
                        choices=["bfloat16", "float16", "int8"])
    parser.add_argument("--fast-ckpt-path", type=str, default=None)
    parser.add_argument("--profiling-path", type=str, default=None)
    parser.add_argument("--H2P", action="store_true", default=False)
    parser.add_argument("--expert-balance-k", type=int, default=8)
    parser.add_argument("--benchmark-type", type=str, default=None,
                        choices=["prefill", "decode"])

    # === Quantization Parameters ===
    parser.add_argument("--kv-quant-calibrate", action="store_true", default=False)
    parser.add_argument("--comm-quant-calibrate", action="store_true", default=False)
    parser.add_argument("--kv-scale-path", type=str, default=None)
    parser.add_argument("--comm-scale-path", type=str, default=None)

    args = parser.parse_args()
    ARGS.__dict__.update(vars(args))
    
    # === Argument Validations ===
    if (ARGS.comm_quant_calibrate or ARGS.comm_scale_path is not None) and not ARGS.H2P:
        raise ValueError("Quantization communication requires H2P mode.")
    if ARGS.comm_quant_calibrate and not ARGS.comm_scale_path:
        raise ValueError("When 'comm_quant_calibrate' is set, 'comm_scale_path' must be specified.")
    if ARGS.kv_quant_calibrate and not ARGS.kv_scale_path:
        raise ValueError("When 'kv_quant_calibrate' is set, 'kv_scale_path' must be specified.")
        
    # === Device Information ===
    device_name = torch.npu.get_device_name()
    if "310" in device_name:
        ARGS.hardware = "300I"
        if hasattr(ARGS, "benchmark_type") and ARGS.benchmark_type == "decode":
            ARGS.expert_balance_k = 5
        communication_adaptation_300I()
    elif "910" in device_name:
        ARGS.hardware = "800I"
        ARGS.expert_balance_k = 8
    else:
        raise ValueError("Unsupported device: Only 300I and 800I are supported.")

    # === Dtype Mapping ===
    dtype_mapping = {
        "float16": torch.float16,
        "bfloat16": torch.bfloat16,
        "int8": torch.int8,
    }

    if ARGS.ckpt_dtype not in dtype_mapping:
        raise ValueError("Unsupported ckpt dtype. Choose from: float16, bfloat16, int8")

    # Handle 300I not supporting bfloat16
    if ARGS.hardware == "300I" and ARGS.ckpt_dtype == "bfloat16":
        print_rank_0("Warning: 300I does not support bfloat16. Falling back to float16.")
        ARGS.ckpt_dtype = "float16"

    ARGS.model_dtype = dtype_mapping[ARGS.ckpt_dtype]
    ARGS.float_dtype = torch.float16 if ARGS.model_dtype == torch.int8 else ARGS.model_dtype


class InferenceParams:
    """Inference parameters that are passed to the main model in order
    to efficienly calculate and store the context during inference.
    """
    def __init__(self, temperature: float = 0.0, top_k: int = 1, top_p: float = 1.0) -> None:
        self.batch_size_offset = 0
        self.is_prefill = True
        self.temperature = temperature
        self.top_k = top_k
        self.top_p = top_p


def init_parallel_group():
    ARGS.world_size = dist.get_world_size() if dist.is_initialized() else 1
    ARGS.rank = dist.get_rank() if dist.is_initialized() else 0
    ARGS.all_rank_group = torch.distributed.new_group(ranks=range(ARGS.world_size))

    rank = ARGS.rank
    world_size = ARGS.world_size
    ARGS.routed_expert_ep_rank = rank // ARGS.routed_expert_tp_size
    ARGS.routed_expert_tp_rank = rank % ARGS.routed_expert_tp_size
    
    # initial routed ep group
    all_routed_ep_groups = []
    for j in range(0, ARGS.routed_expert_tp_size):
        ranks = range(j, world_size, ARGS.routed_expert_tp_size)
        all_routed_ep_groups.append(list(ranks))
        group = torch.distributed.new_group(ranks)
        if rank in ranks:
            ARGS.routed_expert_ep_group = group

    # initial routed tp group
    all_routed_tp_groups = []
    for j in range(0, ARGS.routed_expert_ep_size):
        ranks = range(j * ARGS.routed_expert_tp_size, (j + 1) * ARGS.routed_expert_tp_size)
        all_routed_tp_groups.append(list(ranks))
        group = torch.distributed.new_group(ranks)
        if rank in ranks:
            ARGS.routed_expert_tp_group = group

    print_rank_0(f"All routed TP groups: {all_routed_tp_groups}")
    print_rank_0(f"All routed EP groups: {all_routed_ep_groups}")
    
    ARGS.share_expert_ep_rank = rank // ARGS.share_expert_tp_size
    ARGS.share_expert_tp_rank = rank % ARGS.share_expert_tp_size

    # initial share ep group
    all_share_ep_groups = []
    for j in range(0, ARGS.share_expert_tp_size):
        ranks = range(j, world_size, ARGS.share_expert_tp_size)
        all_share_ep_groups.append(list(ranks))
        group = torch.distributed.new_group(ranks)
        if rank in ranks:
            ARGS.share_expert_ep_group = group

    #initial routed tp group
    all_share_tp_groups = []
    for j in range(0, ARGS.share_expert_ep_size):
        ranks = range(j * ARGS.share_expert_tp_size, (j + 1) * ARGS.share_expert_tp_size)
        all_share_tp_groups.append(list(ranks))
        group = torch.distributed.new_group(ranks)
        if rank in ranks:
            ARGS.share_expert_tp_group = group

    print_rank_0(f"All share TP groups: {all_share_tp_groups}")
    print_rank_0(f"All share EP groups: {all_share_ep_groups}")

    ARGS.atten_dp_rank = rank // ARGS.atten_tp_size
    ARGS.atten_tp_rank = rank % ARGS.atten_tp_size
    
    # initial atten dp group
    all_atten_dp_groups = []
    for j in range(0, ARGS.atten_tp_size):
        ranks = range(j, world_size, ARGS.atten_tp_size)
        all_atten_dp_groups.append(list(ranks))
        group = torch.distributed.new_group(ranks)
        if rank in ranks:
            ARGS.atten_dp_group = group

    # initial atten tp group
    all_atten_tp_groups = []
    for j in range(0, ARGS.atten_dp_size):
        ranks = range(j * ARGS.atten_tp_size, (j + 1) * ARGS.atten_tp_size)
        all_atten_tp_groups.append(list(ranks))
        group = torch.distributed.new_group(ranks)
        if rank in ranks:
            ARGS.atten_tp_group = group

    print_rank_0(f"All attention TP groups: {all_atten_tp_groups}")
    print_rank_0(f"All attention DP groups: {all_atten_dp_groups}")


def compile_graph(model):
    import torchair
    if torch.__version__ < "2.3.1":
        try:
            torchair.patch_for_hcom()
        except Exception as err:
            print_rank_0(f"[Warning] torchair.patch_for_hcom() failed: {err}, using legacy patch...")
            import torchair.ge_concrete_graph.ge_converter.experimental.patch_for_hcom_allreduce
            import torchair.ge_concrete_graph.ge_converter.experimental.hcom_allgather
        print_rank_0("[Info] HCOM patch enabled for compatibility.")

    config = torchair.CompilerConfig()
    config.experimental_config.frozen_parameter = True
    config.experimental_config.tiling_schedule_optimize = True
    torch.npu.set_compile_mode(jit_compile=False)
    torch._dynamo.config.cache_size_limit = 64

    def null_compile(func, *args, **kwargs):
        return func

    base_compile_warpper = torch.compile
    prefill_compile_warpper = null_compile
    decode_compile_warpper = null_compile

    npu_backend = torchair.get_npu_backend(compiler_config=config)
    prefill_compile_kwargs = dict(fullgraph=True, backend=npu_backend)
    decode_compile_kwargs = dict(fullgraph=True, backend=npu_backend)
    
    if ARGS.cache_compile_path is not None:
        base_compile_warpper = torchair.inference.cache_compile
        prefill_compile_kwargs["config"] = config
        decode_compile_kwargs["config"] = config
        prefill_cache_path = os.path.join(ARGS.cache_compile_path, "prefill_cache_graph")
        decode_cache_path = os.path.join(ARGS.cache_compile_path, "decode_cache_graph")
        os.makedirs(prefill_cache_path, exist_ok=True)
        os.makedirs(decode_cache_path, exist_ok=True)
        prefill_compile_kwargs['cache_dir'] = prefill_cache_path
        decode_compile_kwargs['cache_dir'] = decode_cache_path

    prefill_compile_warpper = base_compile_warpper
    prefill_compile_kwargs['dynamic'] = True
    prefill_compile_kwargs['fullgraph'] = True
    decode_compile_warpper = base_compile_warpper
    decode_compile_kwargs['dynamic'] = False
    decode_compile_kwargs['fullgraph'] = True

    print_rank_0(f"prefill compile with: {prefill_compile_warpper} {prefill_compile_kwargs}")
    print_rank_0(f"decode compile with: {decode_compile_warpper} {decode_compile_kwargs}")
    model.prefill = prefill_compile_warpper(model.prefill, **prefill_compile_kwargs)
    model.decode = decode_compile_warpper(model.decode, **decode_compile_kwargs)
    print_rank_0("=" * 80)
    print_rank_0("Preparing torchair graph compilation...".center(80))
    print_rank_0("=" * 80)
    return model


def generate_attention(tokens: Tensor) -> Tensor:
    batch_size, seq_len = tokens.size()
    mask = torch.tril(torch.ones((seq_len, seq_len), device=tokens.device)).unsqueeze(0).unsqueeze(0)
    mask = mask.expand(batch_size, 1, seq_len, seq_len)
    attention_mask = mask < 0.5
    return attention_mask


def partition_input_dp(
        tokens: Tensor,
        position_ids: Tensor,
        attention_mask: Tensor,
        start_pos: Tensor,
        ):
    dp_size = ARGS.atten_dp_size
    dp_rank = ARGS.atten_dp_rank
    tokens = tokens.chunk(dp_size)[dp_rank]
    position_ids = position_ids.chunk(dp_size)[dp_rank]
    # mask is None when doing speculative decoding
    if attention_mask is not None:
        attention_mask = attention_mask.chunk(dp_size)[dp_rank]
    if start_pos is not None:
        start_pos = start_pos.chunk(dp_size)[dp_rank]
    return tokens, position_ids, attention_mask, start_pos


def batch_padding(global_batch_size_before_padding: int) -> None:
    # padding batch according to setting
    factor = ARGS.atten_dp_size
    global_batch_size_after_padding = int((global_batch_size_before_padding + factor - 1)// factor * factor)
    ARGS.global_batch_size_after_padding = global_batch_size_after_padding
    ARGS.mini_batch_size = ARGS.global_batch_size_after_padding // ARGS.atten_dp_size

    if ARGS.prefill_batch_size is None:
        ARGS.prefill_batch_size = ARGS.global_batch_size_after_padding

    if ARGS.prefill_batch_size % factor != 0:
        raise ValueError(
            f"[Invalid prefill_batch_size] Must be divisible by "
            f"atten_dp_size ({factor}), "
            f"but got {ARGS.prefill_batch_size}."
        )


def reduce_scatter(input_: Tensor, group, world_size: int) -> Tensor:
    if world_size == 1:
        return input_

    if input_.shape[0] % world_size != 0:
        raise ValueError(f"Input batch size ({input_.shape[0]}) must be divisible by world_size ({world_size}).")

    output_shape = (input_.shape[0] // world_size, *input_.shape[1:])
    output_ = torch.empty(output_shape, dtype=input_.dtype, device=input_.device)

    if ARGS.compile:
        torch.distributed.reduce_scatter_tensor(output_, input_, group=group)
    else:
        torch.distributed.reduce_scatter_tensor(output_, input_, group=group, op=torch.distributed.ReduceOp.SUM)

    return output_


def all_gather(
    input_: Tensor,
    group,
    world_size: int,
    quant_flag: bool = False,
    scale: Tensor = None,
    offset: Tensor = None
) -> Tensor:
    if world_size == 1:
        return input_

    output_shape = (input_.shape[0] * world_size, *input_.shape[1:])
    if quant_flag:
        output_ = torch.empty(output_shape, dtype=torch.int8, device=input_.device)
        quant_input = torch_npu.npu_quantize(input_, scale, None, torch.qint8, -1, True)
        torch.distributed.all_gather_into_tensor(output_, quant_input.contiguous(), group=group)
        output_ = torch_npu.npu_anti_quant(output_, scale, offset=offset, dst_dtype=ARGS.float_dtype)
    else:
        output_ = torch.empty(output_shape, dtype=input_.dtype, device=input_.device)
        torch.distributed.all_gather_into_tensor(output_, input_.contiguous(), group=group)

    return output_


def communication_adaptation_300I():

    def broadcast_300I(tensor, src, group=None, async_op=False):
        rank = torch.distributed.get_rank(group)
        world_size = torch.distributed.get_world_size(group)
        tensor_list = [torch.empty_like(tensor) for _ in range(world_size)]
        tensor_list[rank] = tensor
        torch.distributed.all_gather(tensor_list, tensor, group=group)
        tensor[...] = tensor_list[src]
        if async_op:
            return NullHandle()
        else:
            return None

    torch.distributed.broadcast = broadcast_300I
    print_rank_0("[Info] torch.distributed.broadcast has been patched for 300I compatibility.")


def random_sampling(logits, temperature=1.0, top_k=None, top_p=0.8, fast_sampling=False, seed=-1):
    if isinstance(top_k, int) and top_k == 1:
        return torch.argmax(logits, dim=-1)[:, None]

    logits_scaled = logits / temperature

    if isinstance(top_k, int):
        if top_k > 0:
            topk_logits, source_indices = torch.topk(logits_scaled, top_k, dim=-1)
        else:
            topk_logits = logits_scaled
            source_indices = None
    
    elif isinstance(top_k, torch.Tensor):
        max_top_k = top_k.max().item()
        topk_logits, source_indices = torch.topk(logits_scaled, max_top_k, dim=-1)

        # Mask values beyond individual top_k
        range_mask = torch.cumsum(
            torch.ones_like(topk_logits, dtype=torch.int32), dim=-1
        ) > top_k.unsqueeze(-1)
        topk_logits[range_mask] = torch.finfo(topk_logits.dtype).min
    else:
        raise NotImplementedError("Unsupported type for top_k")

    probs = torch.softmax(topk_logits, dim=-1)
    cumulative_probs = torch.cumsum(probs, dim=-1)

    if 0 < top_p < 1 and not fast_sampling:
        # Standard top-p (nucleus) sampling
        remove_mask = cumulative_probs > top_p
        remove_mask[..., 1:] = remove_mask[..., :-1].clone()
        remove_mask[..., 0] = 0
        topk_logits += remove_mask.to(logits.dtype) * torch.finfo(logits.dtype).min
        probs = torch.softmax(topk_logits, dim=-1)
        cumulative_probs = torch.cumsum(probs, dim=-1)

    rand = _get_random_values(seed, cumulative_probs, logits, top_p, fast_sampling)

    mask = cumulative_probs > rand
    mask_int = mask.to(torch.int)
    sampled_index = torch.argmax(mask_int, dim=-1)[:, None]

    if source_indices is not None:
        return torch.gather(source_indices, -1, sampled_index)
    else:
        return sampled_index


def _get_random_values(seed, cumulative_probs, logits, top_p, fast_sampling):
    shape = cumulative_probs.shape[:-1]
    device = logits.device
    dtype = logits.dtype

    if seed < 0:
        rand = torch.empty(shape, device=device, dtype=dtype)
        if dist.get_rank() == 0:
            rand.uniform_()
        torch.distributed.broadcast(rand, src=0)
        rand = rand[:, None]
    else:
        torch.manual_seed(seed)
        rand = torch.rand(shape, device=device, dtype=dtype)[:, None]

    if 0 < top_p < 1 and fast_sampling:
        rand = rand * top_p

    return rand


def print_rank_0(*args, **kwargs):
    if not dist.is_initialized() or dist.get_rank() == 0:
        print(*args, flush=True, **kwargs)


def print_model(model):
    print_rank_0('=' * 80)
    print_rank_0(model)
    for name, parameters in model.named_parameters():
        print_rank_0('   {} : {} : {} : {}'.format(
            name, parameters.size(), parameters.requires_grad, parameters.dtype))
    print_rank_0('=' * 80)


def setup_seed(seed, deterministic=False):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch_npu.npu.manual_seed(seed)
    torch_npu.npu.manual_seed_all(seed)
    torch.use_deterministic_algorithms(deterministic)
    if deterministic:
        print_rank_0(f"[Info] Deterministic mode enabled. Reproducibility ensured, performance may degrade.")


def is_json_serializable(value):
    try:
        json.dumps(value)
        return True
    except (TypeError, OverflowError):
        return False


def extract_serializable_args(args):
    args_dict = vars(args) if not isinstance(args, dict) else args
    filtered = {
        k: v for k, v in args_dict.items()
        if any(suffix in k for suffix in ('dp_size', 'tp_size', 'ep_size', 'ckpt_dtype'))
        and is_json_serializable(v)
    }
    return filtered


def save_args(args, path):
    safe_args = extract_serializable_args(args)
    path = Path(path)
    # Ensure directory exists
    path.parent.mkdir(parents=True, exist_ok=True)

    with path.open('w', encoding='utf-8') as f:
        json.dump(safe_args, f, indent=2, sort_keys=True)

    print_rank_0(f"[Info] Saved serializable args to: {path}")


def load_args(path):
    path = Path(path)
    if path.suffix.lower() != '.json':
        raise ValueError(f"Invalid file extension: {path}. Expected a .json file.")
    if not path.is_file():
        raise FileNotFoundError(f"File not found: {path}")

    try:
        with path.open('r', encoding='utf-8') as f:
            return json.load(f)
    except json.JSONDecodeError as e:
        raise ValueError(f"Failed to parse JSON file: {e}")


def args_changed(current_args, saved_args) -> bool:
    current_safe_args = extract_serializable_args(current_args)
    return current_safe_args != saved_args


def make_expert_params_mapping(
    ckpt_gate_proj_name: str,
    ckpt_down_proj_name: str,
    ckpt_up_proj_name: str,
    num_experts: int
) -> List[Tuple[str, str, int, str]]:

    mapping = []
    for expert_id in range(num_experts):
        for shard_id, weight_name in [
            ("w1", ckpt_gate_proj_name),
            ("w2", ckpt_down_proj_name),
            ("w3", ckpt_up_proj_name),
        ]:
            # Choose internal param name based on weight type
            if weight_name in [ckpt_gate_proj_name, ckpt_up_proj_name]:
                internal_param_name = "experts.w13_"
            else:
                internal_param_name = "experts.w2_"

            ckpt_param_prefix = f"experts.{expert_id}.{weight_name}."
            mapping.append((internal_param_name, ckpt_param_prefix, expert_id, shard_id))

    return mapping


def stacked_weight_loader(param: Tensor, loaded_weight: Tensor, name: str, shard_id: str) -> None:
    if "gate_up_proj" in name:
        if 'input_offset' in name or 'input_scale' in name:
            new_weight = loaded_weight.repeat(param.size(0))
            param.data.copy_(new_weight)
        elif 'deq_scale' in name:
            if loaded_weight.dtype != torch.int64:
                loaded_weight = torch_npu.npu_trans_quant_param(loaded_weight.to(torch.float32).npu())
            shard_size = loaded_weight.size(0) // ARGS.share_expert_tp_size
            new_weight = loaded_weight.narrow(0, ARGS.share_expert_tp_rank*shard_size, shard_size).contiguous()
            half_size = param.size(0) // 2
            param[shard_id*half_size:(shard_id+1)*half_size].data.copy_(new_weight)
        else:
            # share expert
            shard_size = loaded_weight.size(0) // ARGS.share_expert_tp_size
            new_weight = loaded_weight.narrow(0, ARGS.share_expert_tp_rank*shard_size, shard_size).contiguous()
            half_size = param.size(0) // 2
            param[shard_id*half_size:(shard_id+1)*half_size].data.copy_(new_weight)

    elif 'kv_cache' in name:
        range_dict = {'k': 0, 'v': 1}
        shard_size = loaded_weight.size(0) // ARGS.atten_tp_size
        new_weight = loaded_weight.narrow(0, ARGS.atten_tp_rank*shard_size, shard_size).contiguous()
        param[range_dict[shard_id]].data.copy_(new_weight)

    elif "qkv_proj" in name:
        if any(key in name for key in ["bias", "weight", "deq_scale"]):
            if 'deq_scale' in name and loaded_weight.dtype != torch.int64:
                loaded_weight = torch_npu.npu_trans_quant_param(loaded_weight.to(torch.float32).npu())

            shard_size = loaded_weight.size(0) // ARGS.atten_tp_size
            new_weight = loaded_weight.narrow(0, ARGS.atten_tp_rank*shard_size, shard_size).contiguous()
            
            if 'q' == shard_id:
                q_size = new_weight.size(0)
                kv_size = (param.size(0) - q_size) // 2
            else:
                kv_size = new_weight.size(0)
                q_size = param.size(0) - kv_size * 2

            range_dict = {
                'q': [0, q_size],
                'k': [q_size, q_size+kv_size],
                'v': [q_size+kv_size, q_size+2*kv_size],
            }

            param[range_dict[shard_id][0]:range_dict[shard_id][1]].data.copy_(new_weight)
                
        elif 'input_offset' in name or 'input_scale' in name:
            new_weight = loaded_weight.repeat(param.size(0))
            param.data.copy_(new_weight)
        else:
            raise ValueError(f"Unhandled qkv_proj name pattern: {name}")
    else:
        raise ValueError(f"Unsupported parameter type in name: {name}")


def expert_weight_loader(param: Tensor, loaded_weight: Tensor, name: str, shard_id: str, expert_id: int) -> None:
    # routed expert
    if shard_id == 'w2':
        if 'w2_deq_scale' in name or 'w2_quant_bias' in name:
            if 'w2_deq_scale' in name and loaded_weight.dtype != torch.int64:
                loaded_weight = torch_npu.npu_trans_quant_param(loaded_weight.to(torch.float32).npu())
            local_num_experts = param.size(0)
            if ARGS.routed_expert_ep_rank == expert_id // local_num_experts:
                shard_ep_id = expert_id % local_num_experts
                param[shard_ep_id].data.copy_(loaded_weight)
        elif 'w2_weight_offset' in name or 'w2_weight_scale' in name:
            local_num_experts = param.size(0)
            if ARGS.routed_expert_ep_rank == expert_id // local_num_experts:
                shard_ep_id = expert_id % local_num_experts
                param[shard_ep_id].data.copy_(loaded_weight.squeeze(-1))
        elif 'w2_input_offset' in name or 'w2_input_scale' in name:
            new_weight = loaded_weight.repeat(param.size(1))
            local_num_experts = param.size(0)
            if ARGS.routed_expert_ep_rank == expert_id // local_num_experts:
                shard_ep_id = expert_id % local_num_experts
                param[shard_ep_id].data.copy_(new_weight)
        else:
            # param, size: 64//EP, 1344 // TP, 5120
            # loaded_weight, size: 5120, 1344
            if param.size(0) * ARGS.routed_expert_ep_size != 64:
                    raise ValueError("The expert's parameter shape does not match 64 divided by ep_size.")
            shard_size = loaded_weight.size(1) // ARGS.routed_expert_tp_size
            new_weight = loaded_weight.t().narrow(0, ARGS.routed_expert_tp_rank*shard_size, shard_size).contiguous()
            local_num_experts = param.size(0)
            if ARGS.routed_expert_ep_rank == expert_id // local_num_experts:
                shard_ep_id = expert_id % local_num_experts
                param[shard_ep_id].data.copy_(new_weight)

    elif shard_id == 'w1':
        if 'w13_deq_scale' in name or 'w13_quant_bias' in name:
            if 'deq_scale' in name and loaded_weight.dtype != torch.int64:
                loaded_weight = torch_npu.npu_trans_quant_param(loaded_weight.to(torch.float32).npu())
            shard_size = loaded_weight.size(0) // ARGS.routed_expert_tp_size
            new_weight = loaded_weight.narrow(0, ARGS.routed_expert_tp_rank*shard_size, shard_size).contiguous()
            local_num_experts = param.size(0)
            if ARGS.routed_expert_ep_rank == expert_id // local_num_experts:
                shard_ep_id = expert_id % local_num_experts
                half_size = param.size(-1) // 2
                param[shard_ep_id, :half_size].data.copy_(new_weight)

        elif 'w13_weight_offset' in name or 'w13_weight_scale' in name:
            shard_size = loaded_weight.size(0) // ARGS.routed_expert_tp_size
            new_weight = loaded_weight.narrow(0, ARGS.routed_expert_tp_rank*shard_size, shard_size).squeeze(-1).contiguous()
            local_num_experts = param.size(0)
            if ARGS.routed_expert_ep_rank == expert_id // local_num_experts:
                shard_ep_id = expert_id % local_num_experts
                half_size = param.size(-1) // 2
                param[shard_ep_id, :half_size].data.copy_(new_weight)
        elif 'w13_input_offset' in name or 'w13_input_scale' in name:
            new_weight = loaded_weight.repeat(param.size(1))
            local_num_experts = param.size(0)
            if ARGS.routed_expert_ep_rank == expert_id // local_num_experts:
                shard_ep_id = expert_id % local_num_experts
                param[shard_ep_id].data.copy_(new_weight)
        else:
            if param.size(0) * ARGS.routed_expert_ep_size != 64:
                    raise ValueError("The expert's parameter shape does not match 64 divided by ep_size.")
            shard_size = loaded_weight.size(0) // ARGS.routed_expert_tp_size
            new_weight = loaded_weight.t().narrow(1, ARGS.routed_expert_tp_rank*shard_size, shard_size).contiguous()
            local_num_experts = param.size(0)
            if ARGS.routed_expert_ep_rank == expert_id // local_num_experts:
                shard_ep_id = expert_id % local_num_experts
                half_size = param.size(-1) // 2
                param[shard_ep_id, :, :half_size].data.copy_(new_weight)

    elif shard_id == 'w3':
        if 'w13_deq_scale' in name or 'w13_quant_bias' in name:
            if 'deq_scale' in name and loaded_weight.dtype != torch.int64:
                loaded_weight = torch_npu.npu_trans_quant_param(loaded_weight.to(torch.float32).npu())
            shard_size = loaded_weight.size(0) // ARGS.routed_expert_tp_size
            new_weight = loaded_weight.narrow(0, ARGS.routed_expert_tp_rank*shard_size, shard_size).contiguous()
            local_num_experts = param.size(0)
            if ARGS.routed_expert_ep_rank == expert_id // local_num_experts:
                shard_ep_id = expert_id % local_num_experts
                half_size = param.size(1) // 2
                param[shard_ep_id, half_size:].data.copy_(new_weight)
        elif 'w13_weight_offset' in name or 'w13_weight_scale' in name:
            shard_size = loaded_weight.size(0) // ARGS.routed_expert_tp_size
            new_weight = loaded_weight.narrow(0, ARGS.routed_expert_tp_rank*shard_size, shard_size).squeeze(-1).contiguous()
            local_num_experts = param.size(0)
            if ARGS.routed_expert_ep_rank == expert_id // local_num_experts:
                shard_ep_id = expert_id % local_num_experts
                half_size = param.size(1) // 2
                param[shard_ep_id, half_size:].data.copy_(new_weight)
        elif 'w13_input_offset' in name or 'w13_input_scale' in name:
            new_weight = loaded_weight.repeat(param.size(1))
            local_num_experts = param.size(0)
            if ARGS.routed_expert_ep_rank == expert_id // local_num_experts:
                shard_ep_id = expert_id % local_num_experts
                param[shard_ep_id].data.copy_(new_weight)
        else:
            if param.size(0) * ARGS.routed_expert_ep_size != 64:
                    raise ValueError("The expert's parameter shape does not match 64 divided by ep_size.")
            shard_size = loaded_weight.size(0) // ARGS.routed_expert_tp_size
            new_weight = loaded_weight.t().narrow(1, ARGS.routed_expert_tp_rank*shard_size, shard_size).contiguous()
            local_num_experts = param.size(0)
            if ARGS.routed_expert_ep_rank == expert_id // local_num_experts:
                shard_ep_id = expert_id % local_num_experts
                half_size = param.size(-1) // 2
                param[shard_ep_id, :, half_size:].data.copy_(new_weight)

    else:
        raise ValueError(f"Unknown shard_id: {shard_id}")


def others_weight_loader(param: Tensor, loaded_weight: Tensor, name: str) -> None:
    if "router_scale" in name:
        param.data.copy_(loaded_weight.squeeze(0))
    elif "o_proj.bias" in name:
        param.data.copy_(loaded_weight)
    elif "embed_tokens" in name:
        shard_size = loaded_weight.size(0) // ARGS.atten_tp_size
        new_weight = loaded_weight.narrow(0, ARGS.atten_tp_rank*shard_size, shard_size).contiguous()
        param.data.copy_(new_weight)
    elif "lm_head" in name:
        shard_size = loaded_weight.size(0) // ARGS.world_size
        new_weight = loaded_weight.narrow(0, ARGS.share_expert_tp_rank*shard_size, shard_size).contiguous()
        param.data.copy_(new_weight)
    elif "down_proj" in name or "o_proj" in name:
        if any(key in name for key in ["deq_scale", "quant_bias", "weight_offset", "weight_scale"]):
            if 'deq_scale' in name and loaded_weight.dtype != torch.int64:
                loaded_weight = torch_npu.npu_trans_quant_param(loaded_weight.to(torch.float32).npu())
            param.data.copy_(loaded_weight)
        elif any(key in name for key in ["input_offset", "input_scale"]):
            new_weight = loaded_weight.repeat(param.size(0))
            param.data.copy_(new_weight)
        else:
            if "down_proj" in name:
                shard_size = loaded_weight.size(1) // ARGS.share_expert_tp_size
                new_weight = loaded_weight.narrow(1, ARGS.share_expert_tp_rank*shard_size, shard_size).contiguous()
            elif "o_proj" in name:
                shard_size = loaded_weight.size(1) // ARGS.atten_tp_size
                new_weight = loaded_weight.narrow(1, ARGS.atten_tp_rank*shard_size, shard_size).contiguous()
            param.data.copy_(new_weight)
    else:
        param.data.copy_(loaded_weight)


def use_internal_format_weight(module, prefix=None):
    from layers import Linear, RowParallelLinear
    from model import PanguFusedMoeBlock, PanguSparseMoeBlock, PanguAttention

    if ARGS.hardware == "300I":
        if ARGS.model_dtype == torch.int8:
            if ARGS.compile:
                # 300I w8a8 compile: all linear weights need to be cast into nz format; GMM weights need to be transposed and cast into nz format.
                if isinstance(module, Linear):
                    module.weight.data = torch_npu.npu_format_cast(module.weight.data, 29).contiguous()
                    if module.quant_enable:
                        module.weight_scale = module.weight_scale.to(torch.float32)
                        # special linear layers like lm_head don't need this trans
                        if not module.dynamic_enable:
                            module.input_scale.data = torch.tensor([module.input_scale.mean()], dtype=torch.float32, device=module.input_scale.device)
                            if module.deq_scale.data.dtype != torch.int64:
                                module.deq_scale.data = torch_npu.npu_trans_quant_param(module.deq_scale.data)

                if isinstance(module, PanguFusedMoeBlock):
                    module.w13_weight.data = torch_npu.npu_format_cast(module.w13_weight.data.transpose(1,2).contiguous(), 29)
                    module.w2_weight.data = torch_npu.npu_format_cast(module.w2_weight.data.transpose(1,2).contiguous(), 29)
                    if module.quant_enable:
                        module.w2_weight_scale.data = module.w2_weight_scale.data.to(torch.float32)
                        module.w13_weight_scale.data = module.w13_weight_scale.data.to(torch.float32)
                        if not module.dynamic_enable:
                            module.w13_input_scale.data = torch.tensor([module.w13_input_scale.data.mean()], dtype=torch.float32, device=module.w13_input_scale.device)
                            module.w2_input_scale.data = torch.tensor([module.w2_input_scale.data.mean()], dtype=torch.float32, device=module.w2_input_scale.device)
                    
                if isinstance(module, PanguSparseMoeBlock):
                    module.gate.weight.data = torch_npu.npu_format_cast(module.gate.weight.data, 29).contiguous()

            else:
                if isinstance(module, Linear) and module.quant_enable:
                    # special linear layers like lm_head don't need this trans
                    module.weight_scale = module.weight_scale.to(torch.float32)
                    if not module.dynamic_enable:
                        module.input_scale.data = torch.tensor([module.input_scale.mean()], dtype=torch.float32, device=module.input_scale.device)


                if isinstance(module, PanguFusedMoeBlock):
                    module.w13_weight.data = module.w13_weight.data.transpose(1,2).contiguous()
                    module.w2_weight.data = module.w2_weight.data.transpose(1,2).contiguous()
                    if module.quant_enable:
                        module.w2_weight_scale.data = module.w2_weight_scale.data.to(torch.float32)
                        module.w13_weight_scale.data = module.w13_weight_scale.data.to(torch.float32)
                        if not module.dynamic_enable:
                            module.w13_input_scale.data = torch.tensor([module.w13_input_scale.data.mean()], dtype=torch.float32, device=module.w13_input_scale.device)
                            module.w2_input_scale.data = torch.tensor([module.w2_input_scale.data.mean()], dtype=torch.float32, device=module.w2_input_scale.device)

    elif ARGS.hardware == "800I":
        if ARGS.compile:
            torch.npu.config.allow_internal_format = True
            if isinstance(module, Linear):
                if module.weight.dtype == torch.int8:
                    # for pertoken
                    # special linear layers like lm_head don't need this trans
                    module.weight_scale.data = module.weight_scale.data.view(-1).to(torch.float32)
                    module.weight_offset.data = module.weight_offset.data.view(-1).to(torch.float32)
                    if not module.dynamic_enable:
                        if module.deq_scale.data.dtype != torch.int64:
                            module.deq_scale.data = torch_npu.npu_trans_quant_param(module.deq_scale.data)
                        # for layers.quant_per_tensor
                        module.input_scale.data = module.input_scale.data.to(torch.float32)

            if isinstance(module, PanguFusedMoeBlock):
                if module.w13_weight.dtype == torch.int8:
                    module.w13_weight.data = torch_npu.npu_format_cast(module.w13_weight.data, 29)
                    module.w2_weight.data = torch_npu.npu_format_cast(module.w2_weight.data, 29)
                    if not module.dynamic_enable:
                        module.w13_input_scale.data = module.w13_input_scale.data.max(0)[0].to(torch.float32)
                        module.w2_input_scale.data = module.w2_input_scale.data.max(0)[0].to(torch.float32)

        else:
            if isinstance(module, Linear):
                if module.weight.dtype == torch.int8:
                    # for pertoken
                    module.weight_scale.data = module.weight_scale.data.view(-1).to(torch.float32)
                    module.weight_offset.data = module.weight_offset.data.view(-1).to(torch.float32)
                    if not module.dynamic_enable:
                        module.input_scale.data = module.input_scale.data.to(torch.float32)

            ############
            if isinstance(module, PanguFusedMoeBlock):
                if module.w13_weight.dtype == torch.int8 and not module.dynamic_enable:
                    module.w13_input_scale.data = module.w13_input_scale.data.max(0)[0].to(torch.float32)
                    module.w2_input_scale.data = module.w2_input_scale.data.max(0)[0].to(torch.float32)

    # all hardware
    if isinstance(module, RowParallelLinear) and module.bias is not None and ARGS.H2P:
        module.bias.data = module.bias.data / ARGS.atten_tp_size
        
    if isinstance(module, PanguSparseMoeBlock):
        local_num_experts = module.router_scale.size(0) // ARGS.routed_expert_ep_size
        module.router_scale.data = module.router_scale.data.narrow(0, ARGS.routed_expert_ep_rank * local_num_experts, local_num_experts).to(ARGS.float_dtype).contiguous()

    # Quant KV cache split
    if isinstance(module, PanguAttention):
        if module.quant_cache:
            module.k_cache_scale = module.kv_cache_scale[0].to(torch.float32) # fp32
            module.v_cache_scale = module.kv_cache_scale[1].to(torch.float32)
            module.k_cache_offset = module.kv_cache_offset[0].to(torch.float32) # fp32
            module.v_cache_offset = module.kv_cache_offset[1].to(torch.float32)

    ############
    for child_name, child_module in module.named_children():
        full_prefix = f"{prefix}.{child_name}" if prefix else child_name
        use_internal_format_weight(child_module, full_prefix)


def load_ckpt_and_quant_scale(model, tokenizer) -> None:
    if ARGS.ckpt_path is None:
        print_rank_0("ARGS.ckpt_path is None. Model will use default initialization.")
        return

    if ARGS.fast_ckpt_path:
        fast_ckpt_dir = Path(ARGS.fast_ckpt_path)
        fast_ckpt_dir.mkdir(parents=True, exist_ok=True)

        fast_ckpt_file = fast_ckpt_dir / f"{ARGS.rank}.pt"
        fast_args_file = fast_ckpt_dir / "args.json"

        load_fast_ckpt = (
            fast_ckpt_file.is_file() and fast_args_file.is_file() and
            not args_changed(ARGS, load_args(fast_args_file))
        )

        if load_fast_ckpt:
            state_dict = torch.load(fast_ckpt_file, map_location='cpu', weights_only=True)
            model.load_state_dict(state_dict, strict=False)
            print_rank_0("[Fast Checkpoint] Loaded checkpoint from the cache.")
        else:
            model.load_weights(ARGS.ckpt_path)
            torch.save(model.state_dict(), fast_ckpt_file)
            save_args(ARGS, fast_args_file)
            tokenizer.save_pretrained(str(fast_ckpt_dir))
            print_rank_0("[Fast Checkpoint] Saved checkpoint in a new cache.")
    else:
        model.load_weights(ARGS.ckpt_path)
        print_rank_0("[Checkpoint] Loaded checkpoint from ARGS.ckpt_path.")

    # load quant scale
    from quant_tool import (
        load_comm_scaling_factor,
        load_kv_scaling_factor,
    )

    if ARGS.kv_scale_path is not None and not ARGS.kv_quant_calibrate:
        print_rank_0("using kv quant for inference")
        load_kv_scaling_factor(model, ARGS.kv_scale_path)
    if ARGS.comm_scale_path is not None and not ARGS.comm_quant_calibrate:
        print_rank_0("using comm quant for inference")
        load_comm_scaling_factor(model, ARGS.comm_scale_path)


def update_model_args_from_config(model_args, model_path: str):
    config_file = os.path.join(model_path, "config.json")
    update_quant_args_from_config(model_args, model_path)

    if not os.path.exists(config_file):
        print_rank_0(f"Config file not found at: {config_file}")
        print_rank_0("Using default model_args.")
        return model_args

    with open(config_file, "r", encoding="utf-8") as f:
        config = json.load(f)

    valid_keys = {f.name for f in fields(model_args)}

    for key, value in config.items():
        if key in valid_keys:
            setattr(model_args, key, value)

    print_rank_0(f"Loaded model_args from: {config_file}")
    print_rank_0(model_args)

    return model_args


def update_quant_args_from_config(model_args, model_path: str):
    import glob
    if ARGS.ckpt_dtype in ["bfloat16", "float16"]:
        print_rank_0("No need to load quant config for bf16/fp16 model")
        return
    
    search_pattern = os.path.join(model_path, "quant_model_description*.json")
    matching_files = glob.glob(search_pattern)

    if not matching_files:
        raise ValueError(f"No quant config file matching 'quant_model_description*.json' found in: {model_path}")
    
    quant_config_file = matching_files[0]

    with open(quant_config_file, "r", encoding="utf-8") as f:
        quant_config = json.load(f)
    model_args.quant_config = quant_config

    print_rank_0(f"Loaded model_args.quant_config from: {quant_config_file}")
    return model_args

