import json
import gc
import safetensors.torch
import sys
import os
import re
import torch
from collections import defaultdict
from safetensors import safe_open
from megatron.training.checkpointing import get_checkpoint_name, get_checkpoint_tracker_filename, read_metadata
from safetensors.torch import load_file

from transformers import (
    AutoConfig,
    AutoModelForCausalLM,
    AutoTokenizer,
)
import math
import argparse
import numpy as np
from collections.abc import Mapping, Sequence
# from model_mapping import get_megatron_config, get_megatron_model, save_hfmodel, clone_state_dict


from safetensors.torch import save_file

from transformers.modeling_utils import (
    WEIGHTS_INDEX_NAME,
    WEIGHTS_NAME,
    SAFE_WEIGHTS_INDEX_NAME,
    SAFE_WEIGHTS_NAME,
)

try:
    from transformers.modeling_utils import shard_checkpoint
    USE_TRANSFORMERS_SAVE = True
except:
    from huggingface_hub.constants import (
        SAFETENSORS_WEIGHTS_FILE_PATTERN,
        SAFETENSORS_INDEX_FILE,
    )
    from huggingface_hub import split_torch_state_dict_into_shards
    USE_TRANSFORMERS_SAVE = False


torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
torch.backends.cuda.enable_mem_efficient_sdp(False)
torch.backends.cuda.enable_flash_sdp(False)


def hf2megatron_base_config(hf_config):
    megatron_config = {
        "hidden_size": hf_config.hidden_size,
        "num_layers": hf_config.num_hidden_layers,
        "num_attention_heads": hf_config.num_attention_heads,
        "vocab_size": hf_config.vocab_size,
        "num_query_groups": hf_config.num_key_value_heads,
        "ffn_hidden_size": hf_config.intermediate_size,
        "head_dim": hf_config.kv_channels if hasattr(hf_config, "kv_channels") else hf_config.hidden_size // hf_config.num_attention_heads,
        "model_type": hf_config.model_type,
        "add_qkv_bias": True if "qwen" in hf_config.model_type else False
        }
    return megatron_config


def hf2deepseek_v3_config(hf_config):
    megatron_config = hf2megatron_base_config(hf_config)
    megatron_config["moe_ffn_hidden_size"] = hf_config.moe_intermediate_size
    megatron_config["moe_router_topk"] = hf_config.num_experts_per_tok
    megatron_config['moe_shared_expert_intermediate_size'] = hf_config.moe_intermediate_size
    megatron_config['kv_lora_rank'] = hf_config.kv_lora_rank
    megatron_config['q_lora_rank'] = hf_config.q_lora_rank
    megatron_config['num_experts'] = hf_config.n_routed_experts
    megatron_config['num_mtp_predictor'] = 1
    megatron_config['v_head_dim'] = hf_config.v_head_dim
    megatron_config['qk_head_dim'] = hf_config.qk_nope_head_dim
    megatron_config['qk_pos_emb_head_dim'] = hf_config.qk_rope_head_dim
    megatron_config['model_type'] = "deepseek_v3"
    return megatron_config

def hf2qwen3moe_config(hf_config):
    megatron_config = hf2megatron_base_config(hf_config)
    megatron_config["moe_ffn_hidden_size"] = hf_config.moe_intermediate_size
    megatron_config["moe_router_topk"] = hf_config.num_experts_per_tok,
    megatron_config['num_experts'] = hf_config.num_experts
    megatron_config['model_type'] = 'qwen3_moe'
    return megatron_config


def get_megatron_config(hf_config):
    if hf_config.model_type == "qwen3_moe":
        return hf2qwen3moe_config(hf_config)
    elif hf_config.model_type == "deepseek_v3":
        return hf2deepseek_v3_config(hf_config)
    elif hf_config.model_type == "qwen2" or hf_config.model_type == "llama":
        return hf2megatron_base_config(hf_config)
    else:
        return hf2megatron_base_config(hf_config)

def hf2megatron_base_model(hf_config, hf_model):
    base_state_dict = {}
    #state_dict = hf_model.state_dict()
    state_dict = hf_model
    mg_config = get_megatron_config(hf_config)
    head_dim = mg_config['head_dim']
    num_query_groups = mg_config['num_query_groups']
    hidden_size = mg_config['hidden_size']
    base_state_dict['embedding.word_embeddings.weight'] = state_dict['model.embed_tokens.weight']

    for layer_id in range(hf_config.num_hidden_layers):
        mg_base_name = f'decoder.layers.{layer_id}.'
        hf_base_name = f'model.layers.{layer_id}.'
        q_proj_weight = state_dict[hf_base_name + 'self_attn.q_proj.weight'].view(num_query_groups, -1, head_dim, hidden_size)
        k_proj_weight = state_dict[hf_base_name + 'self_attn.k_proj.weight'].view(num_query_groups, -1, head_dim, hidden_size)
        v_proj_weight = state_dict[hf_base_name + 'self_attn.v_proj.weight'].view(num_query_groups, -1, head_dim, hidden_size)
        
        qkv_proj = torch.cat([q_proj_weight, k_proj_weight, v_proj_weight], dim=1).view(-1, hidden_size).contiguous()
        base_state_dict[mg_base_name + 'self_attention.linear_qkv.weight'] = qkv_proj
        
        base_state_dict[mg_base_name + 'self_attention.linear_qkv.layer_norm_weight'] = state_dict[hf_base_name + 'input_layernorm.weight']
        
        if "qwen2" in hf_config.model_type:
            q_proj_bias = state_dict[hf_base_name + 'self_attn.q_proj.bias'].view(num_query_groups, -1)
            k_proj_bias = state_dict[hf_base_name + 'self_attn.k_proj.bias'].view(num_query_groups, -1)
            v_proj_bias = state_dict[hf_base_name + 'self_attn.v_proj.bias'].view(num_query_groups, -1)
            qkv_bias = torch.cat([q_proj_bias, k_proj_bias, v_proj_bias], dim=1).view(-1).contiguous()
            base_state_dict[mg_base_name + 'self_attention.linear_qkv.bias'] = qkv_bias
        base_state_dict[mg_base_name + 'self_attention.linear_proj.weight'] = state_dict[hf_base_name + 'self_attn.o_proj.weight']
        
        if "qwen3_moe" not in hf_config.model_type:
            fc1_weight = torch.cat([state_dict[hf_base_name + 'mlp.gate_proj.weight'], state_dict[hf_base_name + 'mlp.up_proj.weight']])
            base_state_dict[mg_base_name + 'mlp.linear_fc2.weight'] = state_dict[hf_base_name + 'mlp.down_proj.weight']
            base_state_dict[mg_base_name + 'mlp.linear_fc1.layer_norm_weight'] = state_dict[hf_base_name + 'post_attention_layernorm.weight']
            base_state_dict[mg_base_name + 'mlp.linear_fc1.weight'] = fc1_weight
    base_state_dict['decoder.final_layernorm.weight'] = state_dict['model.norm.weight']
    base_state_dict['output_layer.weight'] = state_dict['lm_head.weight']

    return base_state_dict

def hf2megatron_qwen3_moe_model(hf_config, hf_model):
    mg_config = get_megatron_config(hf_config)
    mg_state_dict = hf2megatron_base_model(hf_config, hf_model)
    head_dim = mg_config['head_dim']
    hidden_size = mg_config['hidden_size']
    num_query_groups = mg_config['num_query_groups']
    for layer_id in range(mg_config['num_layers']):
        mg_base_name = f'decoder.layers.{layer_id}.'
        hf_base_name = f'model.layers.{layer_id}.'
        mg_state_dict[mg_base_name + 'self_attention.q_layernorm.weight'] = hf_model[hf_base_name + 'self_attn.q_norm.weight']
        mg_state_dict[mg_base_name + 'self_attention.k_layernorm.weight'] = hf_model[hf_base_name + 'self_attn.k_norm.weight']
        mg_state_dict[mg_base_name + 'mlp.router.weight'] = hf_model[hf_base_name + 'mlp.gate.weight']
        for expert_id in range(mg_config['num_experts']):
            fc1_weight = torch.cat([hf_model[hf_base_name + f'mlp.experts.{expert_id}.gate_proj.weight'], hf_model[hf_base_name + f'mlp.experts.{expert_id}.up_proj.weight']]).unsqueeze(0)
            fc2_weight = hf_model[hf_base_name + f'mlp.experts.{expert_id}.down_proj.weight'].unsqueeze(0)
            mg_state_dict[mg_base_name + f'mlp.experts.linear_fc1.weight{expert_id}'] = fc1_weight
            mg_state_dict[mg_base_name + f'mlp.experts.linear_fc2.weight{expert_id}'] = fc2_weight
        mg_state_dict[mg_base_name + 'pre_mlp_layernorm.weight'] = hf_model[hf_base_name + 'post_attention_layernorm.weight']
    return mg_state_dict

def hf2megatron_deepseek_model(hf_config, hf_model, hf_model_path):
    mg_state_dict = {}
    #state_dict = hf_model.state_dict()
    state_dict = hf_model
    mg_config = get_megatron_config(hf_config)
    head_dim = mg_config['head_dim']
    num_query_groups = mg_config['num_query_groups']
    hidden_size = mg_config['hidden_size']
    if mg_config["num_mtp_predictor"] > 0:
        mtp_layer_idx = mg_config["num_layers"]
        # collect weights of model.layers.{mtp_layer_idx} from index.json
        mtp_dict = {}
        index_file = os.path.join(hf_model_path, 'model.safetensors.index.json')
        if not os.path.exists(index_file):
            raise FileNotFoundError("'model.safetensors.index.json' not exists, cannot convert MTP module..")
        with open(index_file, 'r') as f:
            index_data = json.load(f)["weight_map"]
        mtp_map = {k:v for k, v in index_data.items() if f'model.layers.{mtp_layer_idx}' in k}
        files = set(mtp_map.values())
        for file in files:
            with safe_open(os.path.join(hf_model_path, file), framework="pt") as f:
                mtp_dict.update({k: f.get_tensor(k) for k in f.keys() if k in mtp_map})
        
        # NOTE: no-need to copy shared embedding
        # mgmodel.embedding.word_embeddings.weight.copy_(mtp_dict[f"model.layers.{mtp_layer_idx}.embed_tokens.weight"])
        # e/h norm and eh_proj
        mg_state_dict['mtp_predictor.mtp_modules.0.norm1.weight'] = mtp_dict[f'model.layers.{mtp_layer_idx}.enorm.weight']
        mg_state_dict['mtp_predictor.mtp_modules.0.norm2.weight'] = mtp_dict[f'model.layers.{mtp_layer_idx}.hnorm.weight']
        mg_state_dict['mtp_predictor.mtp_modules.0.linear_proj.weight'] = mtp_dict[f'model.layers.{mtp_layer_idx}.eh_proj.weight']
        # attention and mlp
        mtplayer_base_name = "mtp_predictor.mtp_modules.0.decoder.layers.0."
        mg_state_dict[mtplayer_base_name + "input_layernorm.weight"] = mtp_dict[f"model.layers.{mtp_layer_idx}.input_layernorm.weight"]
        mg_state_dict[mtplayer_base_name + "self_attention.linear_q_up_proj.weight"] = mtp_dict[f"model.layers.{mtp_layer_idx}.self_attn.q_b_proj.weight"]
        mg_state_dict[mtplayer_base_name + "self_attention.linear_q_up_proj.layer_norm_weight"] = mtp_dict[f"model.layers.{mtp_layer_idx}.self_attn.q_a_layernorm.weight"]
        mg_state_dict[mtplayer_base_name + "self_attention.linear_q_down_proj.weight"] = mtp_dict[f"model.layers.{mtp_layer_idx}.self_attn.q_a_proj.weight"]
        mg_state_dict[mtplayer_base_name + "self_attention.linear_kv_down_proj.weight"] = mtp_dict[f"model.layers.{mtp_layer_idx}.self_attn.kv_a_proj_with_mqa.weight"]
        mg_state_dict[mtplayer_base_name + "self_attention.linear_kv_up_proj.weight"] = mtp_dict[f"model.layers.{mtp_layer_idx}.self_attn.kv_b_proj.weight"]
        mg_state_dict[mtplayer_base_name + "self_attention.linear_kv_up_proj.layer_norm_weight"] = mtp_dict[f"model.layers.{mtp_layer_idx}.self_attn.kv_a_layernorm.weight"]
        mg_state_dict[mtplayer_base_name + "self_attention.linear_proj.weight"] = mtp_dict[f"model.layers.{mtp_layer_idx}.self_attn.o_proj.weight"]
        mg_state_dict[mtplayer_base_name + "pre_mlp_layernorm.weight"] = mtp_dict[f"model.layers.{mtp_layer_idx}.post_attention_layernorm.weight"]
        mg_state_dict[mtplayer_base_name + "mlp.router.weight"] = mtp_dict[f"model.layers.{mtp_layer_idx}.mlp.gate.weight"]
        mg_state_dict[mtplayer_base_name + "mlp.router.expert_bias"] = mtp_dict[f"model.layers.{mtp_layer_idx}.mlp.gate.e_score_correction_bias"]
        shared_fc1_weight = torch.cat(
            [mtp_dict[f"model.layers.{mtp_layer_idx}.mlp.shared_experts.gate_proj.weight"],
                mtp_dict[f"model.layers.{mtp_layer_idx}.mlp.shared_experts.up_proj.weight"]])
        
        mg_state_dict[mtplayer_base_name + "mlp.shared_experts.linear_fc1.weight"] = shared_fc1_weight
        mg_state_dict[mtplayer_base_name + "mlp.shared_experts.linear_fc2.weight"] = mtp_dict[f"model.layers.{mtp_layer_idx}.mlp.shared_experts.down_proj.weight"]
        mg_state_dict["mtp_predictor.mtp_modules.0.decoder.final_layernorm.weight"] = mtp_dict[f"model.layers.{mtp_layer_idx}.shared_head.norm.weight"]
        mg_state_dict["mtp_embedding.word_embeddings.weight"] = mtp_dict[f"model.layers.{mtp_layer_idx}.embed_tokens.weight"]
        fc1_weight_list = []
        fc2_weight_list = []
        for i in range(mg_config["num_experts"]):
            fc1_weight = torch.cat(
                [mtp_dict[f"model.layers.{mtp_layer_idx}.mlp.experts." + str(i) + ".gate_proj.weight"],
                 mtp_dict[f"model.layers.{mtp_layer_idx}.mlp.experts." + str(i) + ".up_proj.weight"]]).unsqueeze(0)
            fc2_weight = mtp_dict[f"model.layers.{mtp_layer_idx}.mlp.experts." + str(i) + ".down_proj.weight"].unsqueeze(0)
            mg_state_dict[mtplayer_base_name + f"mlp.experts.linear_fc1.weight{i}"] = fc1_weight
            mg_state_dict[mtplayer_base_name + f"mlp.experts.linear_fc2.weight{i}"] = fc2_weight

            #fc1_weight_list.append(fc1_weight)
            #fc2_weight_list.append(mtp_dict[f"model.layers.{mtp_layer_idx}.mlp.experts." + str(i) + ".down_proj.weight"].unsqueeze(0))
        #fc1_weight = torch.cat(fc1_weight_list).permute(0,2,1)
        #fc2_weight = torch.cat(fc2_weight_list).permute(0,2,1)
        #mg_state_dict[mtplayer_base_name + "mlp.experts.weight1"] = fc1_weight
        #mg_state_dict[mtplayer_base_name + "mlp.experts.weight2"] = fc2_weight

    for layer_id in range(mg_config["num_layers"]):
        mg_base_name = f"decoder.layers.{layer_id}."
        hf_base_name = f"model.layers.{layer_id}."
        mg_state_dict[mg_base_name + "input_layernorm.weight"] = state_dict[hf_base_name + "input_layernorm.weight"]
        if mg_config["q_lora_rank"] is None:
            mg_state_dict[mg_base_name + "self_attention.linear_q_proj.weight"] = state_dict[hf_base_name + "self_attn.q_proj.weight"]
        else:
            mg_state_dict[mg_base_name + "self_attention.linear_q_down_proj.weight"] = state_dict[hf_base_name + "self_attn.q_a_proj.weight"]
            mg_state_dict[mg_base_name + "self_attention.linear_q_up_proj.weight"] = state_dict[hf_base_name + "self_attn.q_b_proj.weight"]
            mg_state_dict[mg_base_name + "self_attention.linear_q_up_proj.layer_norm_weight"] = state_dict[hf_base_name + "self_attn.q_a_layernorm.weight"]
        mg_state_dict[mg_base_name + "self_attention.linear_kv_down_proj.weight"] = state_dict[hf_base_name + "self_attn.kv_a_proj_with_mqa.weight"]
        mg_state_dict[mg_base_name + "self_attention.linear_kv_up_proj.weight"] = state_dict[hf_base_name + "self_attn.kv_b_proj.weight"]
        mg_state_dict[mg_base_name + "self_attention.linear_kv_up_proj.layer_norm_weight"] = state_dict[hf_base_name + "self_attn.kv_a_layernorm.weight"]
        mg_state_dict[mg_base_name + "self_attention.linear_proj.weight"] = state_dict[hf_base_name + "self_attn.o_proj.weight"]
        if layer_id > 2:
            mg_state_dict[mg_base_name + "mlp.router.weight"] = state_dict[hf_base_name + "mlp.gate.weight"]
            #mg_state_dict[mg_base_name + "mlp.router.expert_bias"] = state_dict[hf_base_name + "mlp.gate.e_score_correction_bias"]
            #fc1_weight_list = []
            #fc2_weight_list = []
            for i in range(mg_config["num_experts"]):
                fc1_weight = torch.cat([state_dict[hf_base_name + f"mlp.experts.{i}.gate_proj.weight"], state_dict[hf_base_name + f"mlp.experts.{i}.up_proj.weight"]]).unsqueeze(0)
                fc2_weight = state_dict[hf_base_name + f"mlp.experts.{i}.down_proj.weight"].unsqueeze(0)
                mg_state_dict[mg_base_name+ f"mlp.experts.linear_fc1.weight{i}"] = fc1_weight
                mg_state_dict[mg_base_name+ f"mlp.experts.linear_fc2.weight{i}"] = fc2_weight
                #fc1_weight_list.append(fc1_weight)
                #fc2_weight_list.append(fc2_weight)
            #fc1_weight = torch.cat(fc1_weight_list).permute(0,2,1)
            #fc2_weight = torch.cat(fc2_weight_list).permute(0,2,1)
            #mg_state_dict[mg_base_name + "mlp.experts.weight1"] = fc1_weight
            #mg_state_dict[mg_base_name + "mlp.experts.weight2"] = fc2_weight
            mg_state_dict[mg_base_name + "pre_mlp_layernorm.weight"] = state_dict[hf_base_name + "post_attention_layernorm.weight"]
            share_fc1_weight = torch.cat([state_dict[hf_base_name + "mlp.shared_experts.gate_proj.weight"], state_dict[hf_base_name + "mlp.shared_experts.up_proj.weight"]])
            mg_state_dict[mg_base_name + "mlp.shared_experts.linear_fc1.weight"] = share_fc1_weight
            mg_state_dict[mg_base_name + "mlp.shared_experts.linear_fc2.weight"] = state_dict[hf_base_name + "mlp.shared_experts.down_proj.weight"]
        else:
            mg_state_dict[mg_base_name + "mlp.linear_fc1.layer_norm_weight"] = state_dict[hf_base_name + "post_attention_layernorm.weight"]
            mg_state_dict[mg_base_name +"mlp.linear_fc1.weight"] = torch.cat([state_dict[hf_base_name+"mlp.gate_proj.weight"], state_dict[hf_base_name + "mlp.up_proj.weight"]])
            mg_state_dict[mg_base_name + "mlp.linear_fc2.weight"] = state_dict[hf_base_name + "mlp.down_proj.weight"]
    mg_state_dict["embedding.word_embeddings.weight"] = state_dict["model.embed_tokens.weight"]
    mg_state_dict["decoder.final_layernorm.weight"] = state_dict["model.norm.weight"]
    mg_state_dict["output_layer.weight"] = state_dict["lm_head.weight"]
    return mg_state_dict


def get_megatron_model(hf_config, hf_model, hf_model_path=None):
    if hf_config.model_type == "deepseek_v3":
        assert hf_model_path is not None, "Need huggingface model path!"
        return hf2megatron_deepseek_model(hf_config, hf_model, hf_model_path)
    elif hf_config.model_type == "qwen2" or hf_config.model_type == "llama":
        return hf2megatron_base_model(hf_config, hf_model)
    elif hf_config.model_type == "qwen3_moe":
        return hf2megatron_qwen3_moe_model(hf_config, hf_model)
    else:
        raise NotImplementedError


@torch.inference_mode()
def clone_state_dict(elem):
    """clone all tensors in the elem to cpu device.
    """
    elem_type = type(elem)
    if isinstance(elem, torch.Tensor):
        elem = elem.clone()
    elif isinstance(elem, (np.ndarray, str)):
        pass
    elif isinstance(elem, Mapping):
        elem = dict(elem)
        for k, v in elem.items():
            elem[k] = clone_state_dict(v)
        elem = elem_type(elem)
    elif isinstance(elem, Sequence):
        elem = list(elem)
        for i in range(len(elem)):
            elem[i] = clone_state_dict(elem[i])
        elem = elem_type(elem)
    return elem
    
def save_hfmodel(args, model, max_shard_size='10GB'):
    output_state_dict = model
    if not isinstance(model, dict):
        output_state_dict = model.state_dict()
    save_safetensors = (not USE_TRANSFORMERS_SAVE) or args.save_safetensors
    os.makedirs(args.save, exist_ok=True)

    # NOTE: remove all old index files
    if os.path.exists(os.path.join(args.save, SAFE_WEIGHTS_INDEX_NAME)):
        os.remove(os.path.join(args.save, SAFE_WEIGHTS_INDEX_NAME))
    if os.path.exists(os.path.join(args.save, WEIGHTS_INDEX_NAME)):
        os.remove(os.path.join(args.save, WEIGHTS_INDEX_NAME))

    index = None
    if USE_TRANSFORMERS_SAVE:
        weight_file = SAFE_WEIGHTS_NAME if save_safetensors else WEIGHTS_NAME
        index_file = SAFE_WEIGHTS_INDEX_NAME if save_safetensors else WEIGHTS_INDEX_NAME
        shards, index = shard_checkpoint(output_state_dict, max_shard_size=max_shard_size, weights_name=weight_file)
    else:
        if not args.save_safetensors:
            logging.warning("Since Transformer v4.47.0, the HF ckpt can only be saved with safetensors in the scripts.")
        weight_file = SAFETENSORS_WEIGHTS_FILE_PATTERN
        index_file = SAFETENSORS_INDEX_FILE
        state_dict_split = split_torch_state_dict_into_shards(output_state_dict, max_shard_size=max_shard_size, filename_pattern=weight_file)
        shards = {}
        for filename, tensors in state_dict_split.filename_to_tensors.items():
            shards[filename] = {tensor: output_state_dict[tensor] for tensor in tensors}
        if state_dict_split.is_sharded:
            index = {
                "metadata": state_dict_split.metadata,
                "weight_map": state_dict_split.tensor_to_filename,
            }

    for shard_file, shard in shards.items():
        target_file = os.path.join(args.save, shard_file)
        print(f'huggingface model is save to {target_file}')
        if save_safetensors:
            save_file(clone_state_dict(shard), target_file, metadata={"format": "pt"})
        else:
            torch.save(clone_state_dict(shard), target_file)

    if index is not None:
        save_index_file = os.path.join(args.save, index_file)
        # Save the index as well
        with open(save_index_file, "w", encoding="utf-8") as f:
            content = json.dumps(index, indent=2, sort_keys=True) + "\n"
            f.write(content)
        print(
            f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be "
            f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the "
            f"index located at {save_index_file}."
        )


def generate_rank_group(
    tensor_model_parallel_size,
    expert_tensor_parallel_size,
    expert_model_parallel_size,
    pipeline_model_parallel_size
):
    """
        This function attempts to generate group rank on the minimal practicable world_size.
        Support Decoder-Only model currently.
    """
    tp, etp, ep, pp = (
        tensor_model_parallel_size,
        expert_tensor_parallel_size,
        expert_model_parallel_size,
        pipeline_model_parallel_size
    )
    minimal_worldsize = pp * math.lcm(tp, etp * ep)
    print(f"The given parallel config should be run on at least {minimal_worldsize} cards")
    dp = minimal_worldsize // (pp * tp)
    edp = minimal_worldsize // (pp * ep * etp)
    # NOTE: If user want to scale up cp_size, he should downscale
    # dp_size or scale up world_size, i.e., edp_size
    cp = 1

    # TODO: support other orders
    order="tp-cp-ep-dp-pp"
    # In training:
    # Dense:
    # global_rank = tp_rank + cp_rank * tp_size + dp_rank * cp_size * tp_size + pp_rank * dp_size * cp_size * tp_size
    # MoE:
    # global_rank = etp_rank + ep_rank * etp_size + edp_rank * ep_size * etp_size + pp_rank * edp_size * ep_size * etp_size

    # In ckpt loading, each rank will load a checkpoint according to its (tp_rank, pp_rank, ep_rank)
    # Thus, (tp_rank, ep_rank) should map to a unique etp_rank
    rank_mappings = dict()
    local_ids = []
    for global_rank in range(minimal_worldsize):
        tp_rank = global_rank % tp
        etp_rank = global_rank % etp
        ep_rank = (global_rank // etp) % ep
        pp_rank = global_rank // (dp * tp)

        if (tp_rank, ep_rank) not in rank_mappings:
            rank_mappings[(tp_rank, ep_rank)] = etp_rank

        if rank_mappings[(tp_rank, ep_rank)] != etp_rank:
            raise ValueError("The legacy checkpoint format cannot support this parallel config.")

        local_ids.append((tp_rank, etp_rank, ep_rank, pp_rank))
    return local_ids


def add_model_args(parser):

    parser.add_argument(
        "--target-tensor-model-parallel-size",
        type=int,
        default=1
    )

    parser.add_argument(
        "--target-pipeline-model-parallel-size",
        type=int,
        default=1
    )

    parser.add_argument(
        "--target-expert-model-parallel-size",
        type=int,
        default=1
    )

    parser.add_argument(
        "--target-expert-tensor-parallel-size",
        type=int,
        default=1
    )
    parser.add_argument(
        "--load",
        type=str
    )

    parser.add_argument(
        "--config_path",
        type=str
    )

    parser.add_argument(
        "--save",
        type=str
    )

    parser.add_argument(
        "--dtype",
        type=str
    )

    parser.add_argument(
        "--format",
        type=str,
        default="hf2mg",
        help="Convert model format. Support 'hf2mg' or 'mg2hf'."
    )    

    parser.add_argument(
        "--save-safetensors",
        action='store_false',
    )

    parser.add_argument("--num-layers-per-stage", nargs='*', type=int, default=None,
        help='set layers number of each stage.'
        'This argument must be in the form: n0, layers0, n1, layers1, ...'
        'When in non-interleaved mode, the sum of n0, n1, ... should be equal'
        'to pipeline-model-parallel-size.'
        'when in interleaved mode, the sum of n0, n1, ... should be equal'
        'to virtual-pipeline-model-parallel-size * pipeline-model-parallel-size,'
        'spread with pipeline-model-parallel-size firstly'
    )
    
    return parser


def contains(key, str_list):
    for s in str_list:
        if s in key:
            return True
    return False

def split_column_parallel(tensor, tp_rank, tp_size):
    seg = tensor.shape[0] // tp_size
    return tensor[seg * tp_rank: seg * (tp_rank + 1)]

def split_row_parallel(tensor, tp_rank, tp_size):
    seg = tensor.shape[1] // tp_size
    return tensor[:, seg * tp_rank: seg * (tp_rank + 1)]

def check_layer(layers_to_copy, k):
    pattern = re.compile(r"decoder.layers.\d+")
    res = pattern.findall(k)
    return res and res[0] in layers_to_copy.keys()


def save_state_dict(args, model_chunks, checkpoint_name, has_vpp: bool=False, save_args: bool=True):
    """
    Save some model chunks to a megatron checkpoint file
    """
    state_dict = {}
    if save_args:
        state_dict['args'] = args
    state_dict['checkpoint_version'] = 3.0
    state_dict['iteration'] = 0    
    if not has_vpp:
        state_dict['model'] = model_chunks[0]
    else:
        for vpp_id in range(len(model_chunks)):
            state_dict[f"model{vpp_id}"] = model_chunks[vpp_id]
    os.makedirs(os.path.dirname(checkpoint_name), exist_ok=True)
    torch.save(clone_state_dict(state_dict), checkpoint_name)
    del state_dict
    gc.collect()


def save_mgmodel(mgmodel, args, mg_config):
    # tp, etp, ep, pp
    args.tensor_model_parallel_size = args.target_tensor_model_parallel_size
    args.pipeline_model_parallel_size = args.target_pipeline_model_parallel_size

    if "num_experts" in mg_config and mg_config["num_experts"] is not None:
        args.expert_model_parallel_size = args.target_expert_model_parallel_size
        args.expert_tensor_parallel_size = args.target_expert_tensor_parallel_size

    os.makedirs(args.save, exist_ok=True)
    os.system("cp -rf " + args.load + "/*config.json " + args.save)
    os.system("cp -rf " + args.load + "/tokenizer* " + args.save)
    os.system("cp -rf " + args.load + "/*tok* " + args.save)

    tracker_filepath = os.path.join(args.save, 'latest_checkpointed_iteration.txt')
    with open(tracker_filepath, "w") as f:
        f.write("release")
    if args.dtype == "bf16":
        dtype = torch.bfloat16
    elif args.dtype == "fp16":
        dtype = torch.float16
    else:
        dtype = torch.float32

    full_model = mgmodel
    '''
    for k in list(full_model.keys()):
        if 'extra_state' in k:
            # NOTE: since TE 1.14, fp8 metadata will be saved as tensor. 
            # Always drop these values in the MG ckpt to avoid potential issue.
            # This should work fine because fp8 metadata is not supported by HF ckpt.
            full_model[k] = None
        elif full_model[k] is None:
            full_model.pop(k)
    '''

    if "num_experts" in mg_config and mg_config["num_experts"] is not None:
        pattern = r'weight(\d+)'
        assert mg_config["num_experts"] % args.expert_model_parallel_size == 0
        num_local_experts = mg_config["num_experts"] // args.expert_model_parallel_size if mg_config["num_experts"] else 0

    stage_split = args.num_layers_per_stage[::2]
    num_layers_per_stage_split = args.num_layers_per_stage[1::2]
    pp_layers_per_stage = []
    for i in range(len(stage_split)):
        for j in range(stage_split[i]):
            pp_layers_per_stage.append(num_layers_per_stage_split[i])

    tp_size = args.target_tensor_model_parallel_size
    etp_size = args.target_expert_tensor_parallel_size
    group_per_split = mg_config["num_query_groups"] // args.target_tensor_model_parallel_size
    num_query_groups = mg_config["num_query_groups"]
    head_dim = mg_config["head_dim"]
    hidden_size = mg_config["hidden_size"]

    for (tp_rank, etp_rank, ep_rank, pp_rank) in generate_rank_group(
        args.target_tensor_model_parallel_size,
        args.target_expert_tensor_parallel_size,
        args.target_expert_model_parallel_size,
        args.target_pipeline_model_parallel_size
    ):
        #out_state_dict = {"model": {}}
        #model_split = out_state_dict["model"]
        model_split = {}
        layer_offset = sum(pp_layers_per_stage[:pp_rank])
        layers_to_copy = {}
        for layer in range(pp_layers_per_stage[pp_rank]):
            pp_layer_id = layer + layer_offset
            layers_to_copy[f"decoder.layers.{pp_layer_id}"] = layer
        checkpoint_name = get_checkpoint_name(
            args.save, 0, True, 
            args.target_pipeline_model_parallel_size > 1, 
            tp_rank, 
            pp_rank, 
            args.target_expert_model_parallel_size > 1, 
            ep_rank
        )
        print(f'save model to {checkpoint_name}')
        os.makedirs(os.path.dirname(checkpoint_name), exist_ok=True)
        has_mtp = (pp_rank == args.target_pipeline_model_parallel_size - 1)
        for k, v in full_model.items():
            # NOTE: If k not in current pp_rank, skipping
            if check_layer(layers_to_copy, k) and ('mtp' not in k):
                layer_pattern = re.compile(r'\d+')
                res = layer_pattern.findall(k)
                k = re.sub(r"decoder.layers.\d+", "decoder.layers." + str(layers_to_copy["decoder.layers." + res[0]]), k)
            elif 'mtp' in k and not has_mtp:
                print(f"**** not has mtp, k: {k}")
                continue
                #if not has_mtp:
                #    continue
            elif not contains(k, ["word_embeddings", "output_layer", "final_layernorm"]):
                continue

            if not isinstance(v, torch.Tensor):
                target_v = v
            elif contains(k, ['linear_q_down_proj', 'linear_kv_down_proj']) and 'norm' not in k:
                target_v = v
            #elif contains(k, ['linear_q_down_proj', 'linear_kv_down_proj', 'linear_q_up_proj', 'linear_kv_up_proj', 'linear_q_proj']) and 'norm' not in k:
            elif contains(k, ['linear_q_up_proj', 'linear_kv_up_proj', 'linear_q_proj']) and 'norm' not in k:
                target_v = split_column_parallel(v, tp_rank, tp_size)                
            elif 'linear_qkv.weight' in k:
                viewed = v.view(num_query_groups, -1, head_dim, hidden_size)
                viewed = viewed[group_per_split * tp_rank: group_per_split * (tp_rank + 1)]
                target_v = viewed.view(-1, hidden_size)
            elif 'linear_qkv.bias' in k:
                viewed = v.view(num_query_groups, -1, head_dim)
                viewed = viewed[group_per_split * tp_rank: group_per_split * (tp_rank + 1)]
                target_v = viewed.view(-1)                
            elif 'linear_proj' in k:
                target_v = split_row_parallel(v, tp_rank, tp_size)
            elif 'mlp.linear_fc2' in k: # down proj in Dense Layer
                target_v = split_row_parallel(v, tp_rank, tp_size)
            elif 'mlp.linear_fc1' in k and 'norm' not in k: # gate_up proj in Dense Layer
                # Split Gated Column Linear
                seg = mg_config["ffn_hidden_size"] // args.tensor_model_parallel_size
                viewed = v.view(-1, mg_config["ffn_hidden_size"], mg_config["hidden_size"])
                target_v = viewed[:, seg * tp_rank: seg * (tp_rank + 1), :].reshape(-1, mg_config["hidden_size"])
            elif 'experts' in k and 'shared_experts' not in k:
                # NOTE: If k not in current ep_rank, skipping
                expert_rank = int(re.findall(pattern, k)[0])
                if expert_rank // num_local_experts != ep_rank:
                    continue
                expert_local_rank = expert_rank % num_local_experts
                if expert_local_rank == 0:
                    if "linear_fc1" in k:
                        fc1_weight_list = [v]
                    elif "linear_fc2" in k:
                        fc2_weight_list = [v]
                    continue
                elif expert_local_rank == (num_local_experts - 1):
                    if "linear_fc1" in k:
                        fc1_weight_list.append(v)
                        k = k.replace(f"mlp.experts.linear_fc1.weight{expert_rank}", "mlp.experts.weight1")
                        fc1_weight = torch.cat(fc1_weight_list).permute(0,2,1)
                        seg = mg_config["moe_ffn_hidden_size"] // etp_size
                        target_v = fc1_weight[:, :,  seg * etp_rank: seg * (etp_rank + 1)]
                    elif "linear_fc2" in k:
                        fc2_weight_list.append(v)
                        fc2_weight = torch.cat(fc2_weight_list).permute(0,2,1)
                        k = k.replace(f"mlp.experts.linear_fc2.weight{expert_rank}", "mlp.experts.weight2")
                        target_v = split_row_parallel(fc2_weight, etp_rank, etp_size)
                else:
                    if "linear_fc1" in k:
                        fc1_weight_list.append(v)
                    elif "linear_fc2" in k:
                        fc2_weight_list.append(v)
                    continue
                #print(f"********* key name: {k} *******")

                #if args.moe_grouped_gemm == True:
                #    k = k.replace(f'weight{expert_rank}', f'weight{expert_local_rank}')
                #else:
                #    k = k.replace(f'local_experts.{expert_rank}', f'local_experts.{expert_local_rank}')
                #if 'weight1' in k:
                #    seg = 2 * mg_config["moe_ffn_hidden_size"] // etp_size
                #    target_v = v[:, :,  seg * etp_rank: seg * (etp_rank + 1)]
                #elif 'weight2' in k:
                #    target_v = split_row_parallel(v, etp_rank, etp_size)
                #else:
                #    raise NotImplementedError()
            elif 'shared_experts' in k and 'gate' not in k:
                # SharedExperts is from MLP, split by tp_rank
                if 'linear_fc1' in k:
                    viewed = v.view(-1, mg_config["moe_shared_expert_intermediate_size"], mg_config["hidden_size"])
                    seg = mg_config["moe_shared_expert_intermediate_size"] // tp_size
                    target_v = viewed[:, seg * tp_rank: seg * (tp_rank + 1), :].reshape(-1, mg_config["hidden_size"])
                elif 'linear_fc2' in k:
                    target_v = split_row_parallel(v, tp_rank, tp_size)
                else:
                    raise NotImplementedError()
            elif "word_embeddings" in k or "output_layer" in k:
                target_v = split_column_parallel(v, tp_rank, tp_size)
            elif 'eh_proj' in k:
                target_v = split_column_parallel(v, tp_rank, tp_size)
            else:
                target_v = v

            if "embedding.word_embeddings" in k:
                if pp_rank == 0 or ("num_mtp_predictor" in mg_config and has_mtp):
                    model_split[k] = target_v.to(dtype)
            elif "output_layer" in k or "final_layernorm" in k:
                if pp_rank == (args.target_pipeline_model_parallel_size - 1):
                    model_split[k] = target_v.to(dtype)
            else:
                model_split[k] = target_v.to(dtype)
        #print(f"******* checkpoint_name: {checkpoint_name}, has_mtp:{has_mtp}, layer_name: {model_split.keys()}")
        save_state_dict(args, [model_split], checkpoint_name)
    print(f'megatron model is save to {args.save}')


def merge_transformers_sharded_states(path, num_checkpoints):
    """
    Merge sharded checkpoints from transformers into a single checkpoint.
    Args:
        path (str): the path to the sharded checkpoints
        num_checkpoints (int): the number of checkpoints to merge
    """
    state_dict = {}
    for i in range(1, num_checkpoints + 1):
        checkpoint_path = os.path.join(path, f"pytorch_model-{i:05d}-of-{num_checkpoints:05d}.bin")
        current_chunk = torch.load(checkpoint_path, map_location="cpu")
        state_dict.update(current_chunk)
    return state_dict
    

def load_hf_model(args):
    sub_dirs = [x for x in os.listdir(args.load) if x.startswith("pytorch_model")]
    if len(sub_dirs) == 1:
        checkpoint_name = "pytorch_model.bin"
        state_dict = torch.load(os.path.join(args.load_path, checkpoint_name), map_location="cpu")
    elif len(sub_dirs) > 1:
        num_checkpoints = len(sub_dirs) - 1
        state_dict = merge_transformers_sharded_states(args.load, num_checkpoints)
    else:
        # load the transformers model state dict and config [support safetensors format]
        sub_dirs = sorted([x for x in os.listdir(args.load) if x.endswith(".safetensors")])
        state_dict = {}
        for checkpoint_name in sub_dirs:
            current_chunk = load_file(os.path.join(args.load, checkpoint_name), 'cpu')
            state_dict.update(current_chunk)
    return state_dict

def load_mg_model(args):
    os.makedirs(args.save, exist_ok=True)
    # os.system("cp -rf " + args.config_path + "/*config.json " + args.save)
    # os.system("cp -rf " + args.config_path + "/tokenizer* " + args.save)
    # os.system("cp -rf " + args.config_path + "/*.py " + args.save)
    os.system("cp -rf " + args.config_path + "/*config.json " + args.save + " >/dev/null 2>&1 || true")
    os.system("cp -rf " + args.config_path + "/tokenizer* " + args.save + " >/dev/null 2>&1 || true")
    os.system("cp -rf " + args.config_path + "/*.py " + args.save + " >/dev/null 2>&1 || true")
    
    os.system("cp -rf " + args.config_path + "/*config.json " + args.load + " >/dev/null 2>&1 || true")
    os.system("cp -rf " + args.config_path + "/tokenizer* " + args.load + " >/dev/null 2>&1 || true")
    os.system("cp -rf " + args.config_path + "/model.safetensors.index.json " + args.load + " >/dev/null 2>&1 || true")
    
    hf_config = AutoConfig.from_pretrained(args.load, trust_remote_code=True)
    print(f"********** hf_config:{hf_config}")
    mg_config = get_megatron_config(hf_config)

    args.tensor_model_parallel_size = args.target_tensor_model_parallel_size
    args.pipeline_model_parallel_size = args.target_pipeline_model_parallel_size
    args.expert_model_parallel_size = args.target_expert_model_parallel_size
    args.expert_tensor_parallel_size = args.target_expert_tensor_parallel_size
    if args.tensor_model_parallel_size > 1:
        args.sequence_parallel = True

    model_path = args.load
    tracker_filename = get_checkpoint_tracker_filename(model_path)
    iteration, release = read_metadata(tracker_filename)
    #group_per_split = hf_config.num_attention_heads // args.tensor_model_parallel_size
    # print(f"********** mg_config:{mg_config}")
    
    group_per_split = mg_config['num_query_groups'] // args.tensor_model_parallel_size
    # print(f"********** group_per_split = {group_per_split} ***********")
    if "num_experts" in mg_config and mg_config['num_experts'] > 1:
        ep_pattern = r'weight(\d+)'
        num_local_experts = mg_config['num_experts'] // args.expert_model_parallel_size
        
    state_dict = {}
    mid_state = defaultdict(list)
    pp_layers_per_stage = []
    stage_split = args.num_layers_per_stage[::2]
    num_layers_per_stage_split = args.num_layers_per_stage[1::2]
    pp_layers_per_stage = []
    for i in range(len(stage_split)):
        for j in range(stage_split[i]):
            pp_layers_per_stage.append(num_layers_per_stage_split[i])

    layers_to_copy = {}
    for (tp_rank, etp_rank, ep_rank, pp_rank) in generate_rank_group(
            args.tensor_model_parallel_size,
            args.expert_tensor_parallel_size,
            args.expert_model_parallel_size,
            args.pipeline_model_parallel_size
        ):
            layer_offset = sum(pp_layers_per_stage[:pp_rank])
            for layer in range(pp_layers_per_stage[pp_rank]):
                pp_layer_id = layer + layer_offset
                layers_to_copy[(pp_rank, layer)] = pp_layer_id

            checkpoint_name = get_checkpoint_name(
                model_path, 
                iteration, 
                release, 
                args.pipeline_model_parallel_size > 1, 
                tp_rank, 
                pp_rank, 
                args.expert_model_parallel_size > 1,
                ep_rank
            )

            print(f'load {checkpoint_name}')
            split_state = torch.load(checkpoint_name, map_location="cpu")['model']
            for k, v in split_state.items():
                if '_extra_state' in k:
                    continue
                try:
                    pattern = re.compile(r'\d+')
                    res = pattern.findall(k)
                    tgt_layer_id = layers_to_copy[(pp_rank, int(res[0]))]
                    tgt = re.sub(r"decoder.layers.\d+", "decoder.layers." + str(tgt_layer_id), k)

                    if 'experts' in k and 'shared_experts' not in k:
                        fc_id = int(re.findall(ep_pattern, k)[0])
                        for local_expert_rank in range(num_local_experts):
                            expert_rank = local_expert_rank + num_local_experts * ep_rank
                            local_tgt = tgt.replace(f'weight{fc_id}', f'linear_fc{fc_id}.weight{expert_rank}')
                            if local_tgt not in mid_state:
                                mid_state[local_tgt] = [None] * args.target_expert_tensor_parallel_size
                            # NOTE: deduplicate MoE params by individual ETP
                            if mid_state[local_tgt][etp_rank] is not None:
                                # NOTE: Here we can add a check to ensure parameters saved by mcore are synchronized.
                                pass
                            mid_state[local_tgt][etp_rank] = v[local_expert_rank]                       
                    else:
                        if tgt not in mid_state:
                            mid_state[tgt] = [None] * args.target_tensor_model_parallel_size
                        if mid_state[tgt][tp_rank] is not None:
                            # NOTE: Here we can add a check to ensure parameters saved by mcore are synchronized.
                            pass
                        mid_state[tgt][tp_rank] = v
                except:
                    if contains(k, ["word_embeddings", "output_layer"]):
                        if k not in mid_state:
                            mid_state[k] = [None] * args.target_tensor_model_parallel_size
                        mid_state[k][tp_rank] = v
                    elif "final_layernorm" in k:
                        mid_state[k] = [v]
                    else:
                        raise ValueError(f"{k} is missing! ")
    for k, v in mid_state.items():
        try:
            if 'extra_state' in k:
                continue
            if not isinstance(v[0], torch.Tensor):
                target_v = v[0]
            elif contains(k, ['router', 'gate', 'input_layernorm', 'pre_mlp_layernorm', 'enorm', 'hnorm', 'layer_norm_weight', 'final_layernorm']):
                target_v = v[0]
            elif contains(k, ['word_embeddings', 'output_layer', 'linear_q_down_proj', 'linear_q_up_proj', 'linear_kv_down_proj', 'linear_kv_up_proj.layer_norm_weight', 'eh_proj', 'linear_q_proj']):
                target_v = torch.cat(v, dim=0)
            elif 'linear_kv_up_proj' in k:
                viewed = [x.view(group_per_split, -1, mg_config['qk_head_dim'] + mg_config['v_head_dim'], mg_config['kv_lora_rank']) for x in v]
                target_v = torch.cat(viewed, dim=0).view(-1, mg_config['kv_lora_rank'])
            elif 'linear_qkv.weight' in k:
                #for x in v:
                #    print(f"************************x.shape = {x.shape}, expected shape = ({group_per_split}, -1, {mg_config['head_dim']}, {mg_config['hidden_size']})")
                #    try:
                #        viewed = x.view(group_per_split, -1, mg_config['head_dim'], mg_config['hidden_size'])
                #    except RuntimeError as e:
                #        print(f"Error reshaping tensor {x.shape}: {e}")
                #        raise
                viewed = [x.view(group_per_split, -1, mg_config['head_dim'], mg_config['hidden_size']) for x in v]
                target_v = torch.cat(viewed, dim=0).view(-1, mg_config['hidden_size'])
            elif 'linear_qkv.bias' in k:
                viewed = [x.view(group_per_split, -1) for x in v]
                target_v = torch.cat(viewed, dim=0).view(-1)            
            elif 'linear_proj' in k:
                target_v = torch.cat(v, dim=1)
            elif 'linear_fc1' in k:
                viewed = [x.view(2, -1, mg_config['hidden_size']) for x in v]
                target_v = torch.cat(viewed, dim=1).view(-1, mg_config['hidden_size'])
            elif 'linear_fc2' in k:
                target_v = torch.cat(v, dim=1)
            else:
                raise ValueError(f"{k} is missing!")
        except Exception as e:
            print(f"Failed on {k} with shape {[item.shape for item in v]}")
            raise e
        state_dict[k] = target_v

    #missing, unexpected =  model.load_state_dict(state_dict, strict=False)
    return state_dict 

def mg2hf_model(mgmodel, mg_config, args):
    num_query_groups = mg_config['num_query_groups']
    hidden_size = mg_config['hidden_size']
    head_dim = mg_config['head_dim']
    value_num_per_group = mg_config['num_attention_heads'] // num_query_groups
    q_dim_per_group = hidden_size // num_query_groups
    kv_dim_per_group = head_dim
    mtp_dict = {}
    if 'num_mtp_predictor' in mg_config and mg_config['num_mtp_predictor'] > 0:
        mtp_layer_idx = mg_config['num_layers']
        mtp_dict[f"model.layers.{mtp_layer_idx}.embed_tokens.weight"] = mgmodel['embedding.word_embeddings.weight']
        # e/h norm and eh_proj
        mtp_dict[f'model.layers.{mtp_layer_idx}.enorm.weight'] = mgmodel['mtp.layers.0.enorm.weight']
        mtp_dict[f'model.layers.{mtp_layer_idx}.hnorm.weight'] = mgmodel['mtp.layers.0.hnorm.weight']
        mtp_dict[f'model.layers.{mtp_layer_idx}.eh_proj.weight'] = mgmodel['mtp.layers.0.eh_proj.weight']

        # attention and mlp
        mtplayer = "mtp_predictor.mtp_modules.0.decoder.layers.0."
        mtp_dict[f"model.layers.{mtp_layer_idx}.input_layernorm.weight"] = mgmodel[mtplayer +"input_layernorm.weight"]
        mtp_dict[f"model.layers.{mtp_layer_idx}.post_attention_layernorm.weight"] = mgmodel[mtplayer+'pre_mlp_layernorm.weight']
        mtp_dict[f"model.layers.{mtp_layer_idx}.self_attn.q_a_proj.weight"] = mgmodel[mtplayer+'self_attention.linear_q_down_proj.weight']
        mtp_dict[f"model.layers.{mtp_layer_idx}.self_attn.q_b_proj.weight"] = mgmodel[mtplayer+'self_attention.linear_q_up_proj.weight']
        mtp_dict[f"model.layers.{mtp_layer_idx}.self_attn.q_a_layernorm.weight"] = mgmodel['mtplayer+self_attention.linear_q_up_proj.layer_norm_weight']
        mtp_dict[f"model.layers.{mtp_layer_idx}.self_attn.kv_a_proj_with_mqa.weight"] = mgmodel[mtplayer+'self_attention.linear_kv_down_proj.weight']
        mtp_dict[f"model.layers.{mtp_layer_idx}.self_attn.kv_b_proj.weight"] = mgmodel[mtplayer+'self_attention.linear_kv_up_proj.weight']
        mtp_dict[f"model.layers.{mtp_layer_idx}.self_attn.kv_a_layernorm.weight"] = mgmodel[mtplayer+'self_attention.linear_kv_up_proj.layer_norm_weight']
        mtp_dict[f"model.layers.{mtp_layer_idx}.self_attn.o_proj.weight"] = mgmodel[mtplayer+'self_attention.linear_proj.weight']
        mtp_dict[f"model.layers.{mtp_layer_idx}.mlp.gate.weight"] = mgmodel[mtplayer+'mlp.router.weight']
        # NOTE: the e_score_correction_bias in mcore model will be initialized with bfloat16 and \
        # recover to fp32 in the first forward. Convert to fp32 to suit huggingface impl.
        mtp_dict[f"model.layers.{mtp_layer_idx}.mlp.gate.e_score_correction_bias"] = mgmodel[mtplayer+'mlp.router.expert_bias'].float()

        for i in range(args.num_experts):
            (
                mtp_dict[f"model.layers.{mtp_layer_idx}.mlp.experts."+str(i)+".gate_proj.weight"],
                mtp_dict[f"model.layers.{mtp_layer_idx}.mlp.experts."+str(i)+".up_proj.weight"]
            ) = torch.chunk(getattr(mgmodel[mtplayer+'mlp.experts.linear_fc1'], 'weight' + str(i)), 2)
            mtp_dict[f"model.layers.{mtp_layer_idx}.mlp.experts."+str(i)+".down_proj.weight"] = getattr(mgmodel[mtplayer+'mlp.experts.linear_fc2'], 'weight' + str(i))
            
        (
            mtp_dict[f"model.layers.{mtp_layer_idx}.mlp.shared_experts.gate_proj.weight"], 
            mtp_dict[f"model.layers.{mtp_layer_idx}.mlp.shared_experts.up_proj.weight"]
        ) = torch.chunk(mgmodel[mtplayer+'mlp.shared_experts.linear_fc1.weight'], 2)
        mtp_dict[f"model.layers.{mtp_layer_idx}.mlp.shared_experts.down_proj.weight"] = mgmodel[mtplayer+'mlp.shared_experts.linear_fc2.weight']
        # output norm
        mtp_dict[f"model.layers.{mtp_layer_idx}.shared_head.norm.weight"] = mgmodel['mtp.layers.0.final_layernorm.weight']
        # shared output head
        mtp_dict[f"model.layers.{mtp_layer_idx}.shared_head.head.weight"] = mgmodel['output_layer.weight']
    hf_model_dict = {}

    hf_model_dict['model.embed_tokens.weight'] = mgmodel['embedding.word_embeddings.weight']
    # print(f"bbbbbbbbbbbbb mgmodel keys: {mgmodel.keys()}")
    for layer_idx in range(mg_config['num_layers']):
        # print(layer_idx)
        mglayer = f'decoder.layers.{layer_idx}.'
        hflayer = f'model.layers.{layer_idx}.'
        try:
            hf_model_dict[hflayer+'input_layernorm.weight'] = mgmodel[mglayer+'input_layernorm.weight']
        except:
            hf_model_dict[hflayer+'input_layernorm.weight'] = mgmodel[mglayer+'self_attention.linear_qkv.layer_norm_weight']

        if 'q_lora_rank' in mg_config and mg_config['model_type'] == 'deepseek_v3':
            if mg_config['q_lora_rank'] is None:
                hf_model_dict[hflayer+'self_attn.q_proj.weight'] = mgmodel[mglayer+'self_attention.linear_q_proj.weight']
            else:
                hf_model_dict[hflayer+'self_attn.q_a_proj.weight'] = mgmodel[mglayer+'self_attention.linear_q_down_proj.weight']
                hf_model_dict[hflayer+'self_attn.q_b_proj.weight'] =mgmodel[mglayer+'self_attention.linear_q_up_proj.weight']
                hf_model_dict[hflayer+'self_attn.q_a_layernorm.weight']=mgmodel[mglayer+'self_attention.linear_q_up_proj.layer_norm_weight']

            hf_model_dict[hflayer+'self_attn.kv_a_proj_with_mqa.weight'] = mgmodel[mglayer+'self_attention.linear_kv_down_proj.weight']
            hf_model_dict[hflayer+'self_attn.kv_b_proj.weight']=mgmodel[mglayer+'self_attention.linear_kv_up_proj.weight']
            hf_model_dict[hflayer+'self_attn.kv_a_layernorm.weight']=mgmodel[mglayer+'self_attention.linear_kv_up_proj.layer_norm_weight']
            hf_model_dict[hflayer+'self_attn.o_proj.weight']=mgmodel[mglayer+'self_attention.linear_proj.weight']
        else:
            qkv_weight = mgmodel[mglayer +'self_attention.linear_qkv.weight'].view(num_query_groups, -1, head_dim, hidden_size)
            q_weight, k_weight, v_weight = torch.split(qkv_weight, split_size_or_sections=[value_num_per_group, 1, 1], dim=1)
            hf_model_dict[hflayer + 'self_attn.q_proj.weight'] = q_weight.reshape(-1, hidden_size)
            hf_model_dict[hflayer + 'self_attn.k_proj.weight'] = k_weight.reshape(-1, hidden_size)
            hf_model_dict[hflayer + 'self_attn.v_proj.weight'] = v_weight.reshape(-1, hidden_size)
            if 'add_qkv_bias' in mg_config:
                qkv_bias = mgmodel[mglayer +'self_attention.linear_qkv.bias'].view(num_query_groups, -1)
                q_bias, k_bias, v_bias = torch.split(qkv_bias, split_size_or_sections=[q_dim_per_group, kv_dim_per_group, kv_dim_per_group], dim=1)
                hf_model_dict[hflayer + 'self_attn.q_proj.bias'] = q_bias.contiguous().view(-1)
                hf_model_dict[hflayer + 'self_attn.k_proj.bias'] = k_bias.contiguous().view(-1)
                hf_model_dict[hflayer + 'self_attn.v_proj.bias'] = v_bias.contiguous().view(-1)
            hf_model_dict[hflayer+'self_attn.o_proj.weight']=mgmodel[mglayer+'self_attention.linear_proj.weight']

        if (mg_config['model_type'] == 'deepseek_v3' and layer_idx < 3) or ('moe' not in mg_config['model_type']):
            hf_model_dict[hflayer+'post_attention_layernorm.weight'] = mgmodel[mglayer+'mlp.linear_fc1.layer_norm_weight']
            gate_weight, up_weight = torch.split(mgmodel[mglayer+'mlp.linear_fc1.weight'], split_size_or_sections=mg_config['ffn_hidden_size'])
            hf_model_dict[hflayer+'mlp.gate_proj.weight'] = gate_weight
            hf_model_dict[hflayer+'mlp.up_proj.weight'] = up_weight
            hf_model_dict[hflayer+'mlp.down_proj.weight'] = mgmodel[mglayer+'mlp.linear_fc2.weight']
        elif 'num_experts' in mg_config and mg_config['num_experts'] > 1:
            hf_model_dict[hflayer+'mlp.gate.weight'] = mgmodel[mglayer+'mlp.router.weight']
            safe_copy(mgmodel[mglayer+'mlp.router.expert_bias'], hf_model_dict[hflayer+'mlp.gate.e_score_correction_bias'], skip_dtype_assert=False)
            #ddfor i, hfexpert in enumerate(hflayer.mlp.experts):
            for i in range(mg_config['num_experts']):
                hfexpert = hflayer + f'mlp.experts.{i}.'
                #linear_fc1_weighti = getattr(mglayer.mlp.experts.linear_fc1, 'weight' + str(i))
                linear_fc1_weighti = mgmodel[mglayer+f'mlp.experts.linear_fc1.weight{i}']
                gate_weight, up_weight = torch.split(linear_fc1_weighti,
                                                        split_size_or_sections=mg_config['moe_ffn_hidden_size'])
                hf_model_dict[hfexpert+'gate_proj.weight'] = gate_weight
                hf_model_dict[hfexpert+'up_proj.weight'] = up_weight
                #linear_fc2_weighti = getattr(mglayer.mlp.experts.linear_fc2, 'weight' + str(i))
                linear_fc2_weighti = mgmodel[mglayer+'mlp.experts.linear_fc2.weight{i}']
                hf_model_dict[hfexpert +'down_proj.weight'] = linear_fc2_weighti
            if 'moe_shared_expert_intermediate_size' in mg_config:
                shared_expert_gate_weight, shared_expert_up_weight = \
                    torch.split(mgmodel[mglayer+'mlp.shared_experts.linear_fc1.weight'],
                                split_size_or_sections=mg_config['moe_shared_expert_intermediate_size'])
                hf_model_dict[hflayer+'mlp.shared_experts.gate_proj.weight'] = shared_expert_gate_weight
                hf_model_dict[hflayer+'mlp.shared_experts.up_proj.weight'] = shared_expert_up_weight
                hf_model_dict[hflayer+'mlp.shared_experts.down_proj.weight'] = mgmodel[mglayer+'mlp.shared_experts.linear_fc2.weight']
            hf_model_dict[hflayer+'post_attention_layernorm.weight'] = mgmodel[mglayer+'pre_mlp_layernorm.weight']

    hf_model_dict['model.norm.weight'] = mgmodel['decoder.final_layernorm.weight']
    hf_model_dict['lm_head.weight'] = mgmodel['output_layer.weight']

    hf_model_dict.update(mtp_dict)
    return hf_model_dict

    

def add_extra_args(parser):
    parser = add_model_args(parser)
    return parser

def main():
    parser = argparse.ArgumentParser()
    parser = add_extra_args(parser)
    args = parser.parse_args()
    # hf_config = AutoConfig.from_pretrained(args.load, trust_remote_code=True)
    # mg_config = get_megatron_config(hf_config)
    cfg_dir = getattr(args, "config_path", None) or args.load
    hf_config = AutoConfig.from_pretrained(cfg_dir, trust_remote_code=True)
    mg_config = get_megatron_config(hf_config)

    # 运行前自检（避免一开始就用错 4096/32 的 Llama 配置）
    print("==== Sanity Check ====")
    print("model_type:", hf_config.model_type)
    print("hidden_size:", hf_config.hidden_size)
    print("num_heads:", hf_config.num_attention_heads)
    print("num_kv_heads:", getattr(hf_config, "num_key_value_heads",
                                hf_config.num_attention_heads))
    #hf_model = AutoModelForCausalLM.from_pretrained(args.load, trust_remote_code=True, torch_dtype=hf_config.torch_dtype, low_cpu_mem_usage=True)
    if args.format == "hf2mg":
        hf_model = load_hf_model(args)
        mg_model = get_megatron_model(hf_config, hf_model, args.load)
        del hf_model
        gc.collect()
        save_mgmodel(mg_model, args, mg_config)
    else:
        mg_model = load_mg_model(args) 
        hf_model = mg2hf_model(mg_model, mg_config, args)
        del mg_model
        gc.collect()
        save_hfmodel(args, hf_model)

if __name__ == "__main__":
    main()
