python convert_ckpt.py \
       --use-mcore-models \
       --model-type GPT \
       --load-model-type hf \
       --save-model-type mg \
       --target-tensor-parallel-size 8 \
       --target-pipeline-parallel-size 1 \
       --load-dir ./model_from_hf/Qwen3-14B-Base/ \
       --save-dir ./model_weights/Qwen3-14B-mcore \
       --tokenizer-model ./model_from_hf/Qwen3-14B-Base/tokenizer.json \
       --model-type-hf qwen3 \
       --params-dtype bf16 \
       --spec mindspeed_llm.tasks.models.spec.qwen3_spec layer_spec
