#!/bin/bash

    # --target-tensor-parallel-size 2 \
    # --target-pipeline-parallel-size 4 \
    # --num-layer-list 8,8,8,8 \

source /usr/local/Ascend/ascend-toolkit/set_env.sh

export TP=1
export PP=4

python convert_ckpt.py \
    --model-type GPT \
    --target-tensor-parallel-size ${TP} \
    --target-pipeline-parallel-size ${PP} \
    --load-model-type hf \
    --save-model-type mg \
    --model-type-hf llama2 \
    --use-mcore-models \
    --load-dir ./model_from_hf/Llama-2-7b-hf/ \
    --save-dir ./model_weights/Llama-2-7b/mcore/${TP}x${PP} \
    --tokenizer-model ./model_from_hf/Llama-2-7b-hf/tokenizer.model
