###Qwen2Audio Transform
HF_LLAMA_PATH="Qwen-audio-whisper-tiny-qwen-0.5B"

TP=1
PP=1
stage=0

MEGATRON_ENCODER_DIR=qwen2-audio-whisper-tiny-megatron-TP$TP-TE-v1
MEGATRON_FORMAT_DIR=/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/Megatron-LM/qwen2-audio-0.5B-decoder-megatron-TP${TP}-PP${PP}-TE
MEGATRON_FINAL_DIR=/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/Megatron-LM/qwen2-audio-0.6b-instruct-megatron-TP${TP}-PP${PP}-TE

if [ $stage -le 0 ]; then    
    python examples/audiollm/whisper_converter_v1.py --load $HF_LLAMA_PATH --save $MEGATRON_ENCODER_DIR --tensor-parallel-size $TP --load-prefix "audio_tower." --save-prefix "speech_model"
fi



# MEGATRON_FORMAT_DIR=/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/Megatron-LM/qwen2.5-7b-instruct-megatron-TP${TP}-PP${PP}-TE
if [ $stage -le 1 ]; then   
python tools/checkpoint/convert.py \
    --model-type GPT \
    --loader qwen_te \
    --saver mcore \
    --model-size llama2-7B \
    --megatron-path /apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/Megatron-LM \
    --target-tensor-parallel-size ${TP} \
    --target-pipeline-parallel-size $PP \
    --checkpoint-type hf \
    --load-dir ${HF_LLAMA_PATH} \
    --save-dir ${MEGATRON_FORMAT_DIR} \
    --tokenizer-model ${HF_LLAMA_PATH} \
    --bf16 \
    --saver-transformer-impl transformer_engine \
    --loader-transformer-impl transformer_engine
fi

# merge to one


if [ $stage -le 2 ]; then

python examples/audiollm/merge_qwen_audio_ckp.py \
  --encoder-dir "$MEGATRON_ENCODER_DIR" \
  --decoder-dir "$MEGATRON_FORMAT_DIR" \
  --final-dir "$MEGATRON_FINAL_DIR" \
  --tp $TP \
  --pp $PP

fi

