# PORT=29888 ./tools/dist_train.sh ./configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py 8 --npu-ids 0 --cfg-options optimizer.lr=0.02 --seed 0 --opt-level O1 --loss-scale 16.0
source ./env_npu.sh
export ASCEND_SLOG_PRINT_TO_STDOUT=0
export ASCEND_GLOBAL_LOG_LEVEL=3
export PTCOPY_ENABLE=1
export TASK_QUEUE_ENABLE=1
export DYNAMIC_OP="ADD#MUL"
export COMBINED_ENABLE=1
export DYNAMIC_COMPILE_ENABLE=0
export EXPERIMENTAL_DYNAMIC_PARTITION=0
export ASCEND_GLOBAL_EVENT_ENABLE=0
export HCCL_WHITELIST_DISABLE=1

export RANK_SIZE=1
export WORLD_SIZE=1

for((RANK_ID=0;RANK_ID<RANK_SIZE;RANK_ID++))
do
    export RANK=$RANK_ID

    if [ $(uname -m) = "aarch64" ]
    then
        let a=0+RANK_ID*24
        let b=23+RANK_ID*24
        taskset -c $a-$b python ./tools/train.py ./configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py \
            --launcher pytorch \
            --work-dir work_dirs1p \
            --no-validate \
            --cfg-options \
            optimizer.lr=0.0025 \
            --seed 0 \
            --gpu-ids 0 \
            --loss-scale 128.0 \
            --opt-level O1 &
    else
        python ./tools/train.py ./configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py \
            --launcher pytorch \
            --work-dir work_dirs8p2 \
            --no-validate \
            --cfg-options \
            optimizer.lr=0.02 \
            --seed 0 \
            --gpu-ids 0 \
            --loss-scale 128.0 \
            --opt-level O1 &
    fi
done