. path.sh
# export CUDA_VISIBLE_DEVICES="0, 1, 2, 3, 4, 5, 6, 7"
export CUDA_VISIBLE_DEVICES="0, 1, 2, 3, 4, 5, 6, 7"
#export CUDA_VISIBLE_DEVICES="0, 1, 2, 3"
export CUDA_DEVICE_MAX_CONNECTIONS=1
export NCCL_LAUNCH_TIMEOUT=30
export NCCL_SOCKET_IFNAME=eth1
export NCCL_IB_GID_INDEX=3
nccl_ib_hca=$(bash show_gids |  grep $(hostname -I) |  grep v2 | awk '{print $1 ":" $2}' )
export NCCL_IB_HCA="=$nccl_ib_hca"
export NCCL_IB_SL=3
export NCCL_CHECKS_DISABLE=1

#export NCCL_CHECK_DISABLE=1
export NCCL_P2P_DISABLE=0
export NCCL_IB_DISABLE=1
# export NCCL_DEBUG=INFO
export NCCL_LL_THRESHOLD=16384
#export NCCL_IB_CUDA_SUPPORT=1
export NCCL_TIMEOUT=100
# export NVTE_APPLY_QK_LAYER_SCALING=1

# export CUDA_LAUNCH_BLOCKING=1

TOKENIZER_TYPE=GPT2BPETokenizer

HF_LLAMA_PATH=/apdcephfs/private_kaixunhuang/data/pretrained_models/Qwen/Qwen2.5-Omni-7B
merge_path=$HF_LLAMA_PATH/merges.txt
vocab_path=$HF_LLAMA_PATH/vocab.json


GPUS_PER_NODE=8
# Change for mltinode config
MASTER_ADDR=localhost
MASTER_PORT=12324
NNODES=1
NODE_RANK=0
WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))


TP=4
PP=1
CP=1
MICRO_BATCH_SIZE=2
# GLOBAL_BATCH_SIZE=80
GLOBAL_BATCH_SIZE=2880
SEQ_LENGTH=1024
#SAVE_PREFIX=audiollm_whisper_tiny_qwen_0.5b_aishell1_wd5e-4
# SAVE_PREFIX=debug
SAVE_PREFIX=qwen2_5omni_thinker_train_v0.21_test

DATA_PATH=/apdcephfs/private_kaixunhuang/data/chat/train_set/omni_train_2025_7-17/train_test.jsonl
VALID_DATA_PATH=/apdcephfs/private_kaixunhuang/data/chat/train_set/omni_train_2025_7-17/train_test.jsonl
DIR=/apdcephfs/private_kaixunhuang/workspace/metagron-lm-meeting/exp

. utils/parse_options.sh

SAVE_PATH=${DIR}/${SAVE_PREFIX}_TP${TP}_PP${PP}_CP${CP}_MBZ8_GBSZ${GLOBAL_BATCH_SIZE}_seq${SEQ_LENGTH}

if [[ "${GPU_NAME^^}" == *"H20"* ]]; then
    echo "检测到 GPU 型号: $GPU_NAME，指定bond1网卡..."


    NET_TYPE="high"
    # # 强制使用 bond1 接口
    # export NCCL_SOCKET_IFNAME=bond1
    # export NCCL_IB_HCA=mlx5_0,mlx5_1  # 明确指定RDMA设备

    # # 200Gbps网络特殊优化
    # export NCCL_NET_GDR_LEVEL=PHB
    # export NCCL_SHM_DISABLE=1
    # export NCCL_P2P_DISABLE=0
    # export NCCL_NET_SHARED_COMMS=0

    # # 腾讯云RDMA特殊参数
    # export NCCL_IB_TIMEOUT=22
    # export NCCL_IB_RETRY_CNT=7
    # export NCCL_IB_QPS_PER_CONNECTION=4

    # # 调试参数
    # # export NCCL_DEBUG=INFO
    # export NCCL_DEBUG_SUBSYS=INIT,NET,ENV
    # # 在启动命令前添加
    # export NCCL_IB_DISABLE=1  # 强制使用TCP而不是RDMA
    # export NCCL_SOCKET_IFNAME=bond1  # 明确指定网络接口

    # export NCCL_SOCKET_IFNAME=bond1       # 强制指定 bond1 接口
    # export NCCL_IB_DISABLE=1              # 禁用 RDMA（如果问题仍存在）


    # # 优化TCP协议栈参数
    # export NCCL_PROTO=Simple       # 对中等规模模型更高效
    # export NCCL_ALGO=Tree          # 树状算法更适合多节点
    # export NCCL_SOCKET_NTHREADS=16  # 增加socket线程数
    # export NCCL_NSOCKS_PERTHREAD=4 # 每线程socket数

    # # 避免共享内存竞争
    # export NCCL_SHM_USE_CUDA_MEMCPY=1

    # # 即使禁用IB也需设置的参数
    # export NCCL_NET_GDR_READ=0      # 禁用GPU Direct RDMA读取
    # export NCCL_IGNORE_CPU_AFFINITY=1 # 避免CPU亲和性干扰
    # export NCCL_LAUNCH_MODE=GROUP   # 分组启动减少延迟

    # export NCCL_DEBUG=INFO
    # NORMAL 参数
    # export NCCL_IB_GID_INDEX=3
    # export NCCL_IB_SL=3
    # export NCCL_CHECKS_DISABLE=1
    # export NCCL_P2P_DISABLE=0
    # export NCCL_IB_DISABLE=0
    # export NCCL_LL_THRESHOLD=16384
    # export NCCL_IB_CUDA_SUPPORT=1
    # export NCCL_SOCKET_IFNAME=bond1
    # export UCX_NET_DEVICES=bond1
    # export NCCL_IB_HCA=mlx5_bond_1,mlx5_bond_5,mlx5_bond_3,mlx5_bond_7,mlx5_bond_4,mlx5_bond_8,mlx5_bond_2,mlx5_bond_6
    # export NCCL_COLLNET_ENABLE=0
    # export SHARP_COLL_ENABLE_SAT=0
    # export NCCL_NET_GDR_LEVEL=2
    # export NCCL_IB_QPS_PER_CONNECTION=4
    # export NCCL_IB_TC=160
    # export NCCL_PXN_DISABLE=1

    # TCCL 参数
    # export NCCL_DEBUG=INFO
    export NCCL_IB_GID_INDEX=3
    export NCCL_IB_SL=3
    export NCCL_CHECK_DISABLE=1
    export NCCL_P2P_DISABLE=0
    export NCCL_IB_DISABLE=0
    export NCCL_LL_THRESHOLD=16384
    export NCCL_IB_CUDA_SUPPORT=1
    export NCCL_SOCKET_IFNAME=bond1
    export UCX_NET_DEVICES=bond1
    export NCCL_IB_HCA=mlx5_bond_1,mlx5_bond_5,mlx5_bond_3,mlx5_bond_7,mlx5_bond_4,mlx5_bond_8,mlx5_bond_2,mlx5_bond_6
    export NCCL_COLLNET_ENABLE=0
    export SHARP_COLL_ENABLE_SAT=0
    export NCCL_NET_GDR_LEVEL=2
    export NCCL_IB_QPS_PER_CONNECTION=4
    export NCCL_IB_TC=160
    export NCCL_PXN_DISABLE=0
    export NCCL_NVLS_ENABLE=0 # sharp关闭
    export NCCL_MPI_PROFILE_PRIMS_ENABLE=1 # MPI日志开启

    # export NCCL_SOCKET_IFNAME=bond1
    # export NCCL_IB_DISABLE=1
    # export NCCL_NET_GDR_READ=1
    # export NCCL_IB_MTU=4096
    # export NCCL_BUFFSIZE=4194304
    # export NCCL_NSOCKS_PERTHREAD=8
    # export NCCL_MIN_CHANNELS=8
    # export NCCL_MAX_CHANNELS=64
    # export NCCL_CROSS_NIC=0

else
    export NCCL_SOCKET_IFNAME=eth1
    export NCCL_IB_GID_INDEX=3
    export NCCL_IB_HCA=mlx5_2:1,mlx5_2:1
    export NCCL_IB_SL=3
    export NCCL_CHECK_DISABLE=1
    export NCCL_P2P_DISABLE=0
    export NCCL_LL_THRESHOLD=16384
    export NCCL_IB_CUDA_SUPPORT=1

    # export NCCL_DEBUG=INFO
    export NCCL_PORT_RANGE="34000-35000"
    export NCCL_IB_DISABLE=1
    export NCCL_SHM_DISABLE=0
fi

tensorboard_output=$SAVE_PATH/tensorboard

mkdir -p $SAVE_PATH/tensorboard
CHECKPOINT_PATH=/apdcephfs/private_kaixunhuang/data/pretrained_models/megatron/qwen2-5omni-8b-megatron-TP${TP}-PP${PP}-TE
# 判断save_path是否有iter_0000001，没有的话则从checkpoint复制过去
if [ ! -d "$SAVE_PATH/iter_0000001" ] && [ "$NODE_RANK" -eq 0 ]; then
    echo "$SAVE_PATH/iter_0000001 not found, copying from $CHECKPOINT_PATH"
    cp -r $CHECKPOINT_PATH/* $SAVE_PATH/
fi

cp $0 $SAVE_PATH/

DISTRIBUTED_ARGS="
    --nproc_per_node $GPUS_PER_NODE \
    --nnodes $NNODES \
    --node_rank $NODE_RANK \
    --master_addr $MASTER_ADDR \
    --master_port $MASTER_PORT
"
      
MODEL_ARGS="
    --ffn-hidden-size 18944 \
    --num-layers 28 \
    --hidden-size 3584 \
    --num-attention-heads 28 \
    --group-query-attention \
    --num-query-groups 4 \
    --norm-epsilon 1e-5 \
    --seq-length $SEQ_LENGTH \
    --rotary-base 1000000 \
    --max-position-embeddings $SEQ_LENGTH \
    --attention-dropout 0 \
    --hidden-dropout 0 \
    --use-rotary-position-embeddings \
    --normalization RMSNorm \
    --no-position-embedding \
    --disable-bias-linear \
    --swiglu \
    --attention-softmax-in-fp32 \
    --untie-embeddings-and-output-weights \
    --make-vocab-size-divisible-by $((152064 / TP)) \
    --add-qkv-bias
"

OUTPUT_ARGS="
    --log-interval 1 \
    --save-interval 200 \
    --eval-interval 100 \
    --log-throughput \
    --log-memory-to-tensorboard \
    --log-timers-to-tensorboard \
    --tensorboard-dir $tensorboard_output \
    --eval-iters 5
"


    #--make-vocab-size-divisible-by 156032 \
# torchrun $DISTRIBUTED_ARGS megatron/core/models/multimodal/penguins_llm.py \
torchrun $DISTRIBUTED_ARGS examples/audiollm/finetune_qwen2_5omni_thinker.py \
    $MODEL_ARGS \
    $OUTPUT_ARGS \
    --micro-batch-size $MICRO_BATCH_SIZE \
    --global-batch-size $GLOBAL_BATCH_SIZE \
    --tensor-model-parallel-size $TP \
    --pipeline-model-parallel-size $PP \
    --context-parallel-size $CP \
    --extra-equivalent-layers 16 \
    --lr-warmup-iters 200 \
    --lr 1e-5 \
    --min-lr 2e-6 \
    --lr-decay-iters 9000 \
    --lr-decay-style cosine \
    --weight-decay 5e-4 \
    --bf16 \
    --ckpt-format torch \
    --train-iters 100000 \
    --tokenizer-type $TOKENIZER_TYPE \
    --vocab-file $vocab_path \
    --merge-file $merge_path \
    --recompute-activations \
    --recompute-granularity selective \
    --use-distributed-optimizer \
    --optimizer hybridadam \
    --save $SAVE_PATH \
    --dataloader-type external \
    --num-workers 32 \
    --load $SAVE_PATH \
    --no-load-optim \
    --no-load-rng \
   --whisper-conv1-out-dim 1280 \
   --whisper-hidden-size 1280 \
   --whisper-num-attention-heads 20 \
   --whisper-num-layers 32 \
   --whisper-input-dim 128 \
    --train-data-path $DATA_PATH \
    --valid-data-path $VALID_DATA_PATH 
    
    
    
    # \
    # --freeze-llm \
    # --freeze-whisper





    #--sequence-parallel \

        # --train-data-path /apdcephfs_qy3/share_976139/users/joyounglv/audiollama/data/aishell1/train_asr_aishell1.json \
    # --load ./tmp_whisper_noinit
    # --load whisper-large-v3-megatron-TP${TP}-TE
