#!/bin/bash

# DS_BUILD_FUSED_ADAM=1 pip install -U deepspeed==0.6.5

export NCCL_SOCKET_IFNAME=eth0
export NCCL_IB_DISABLE=0
export NCCL_IB_CUDA_SUPPORT=1
export NCCL_IB_GID_INDEX=0
export NCCL_IB_HCA=mlx5_2
# export NCCL_IB_HCA=$(pushd /sys/class/infiniband/ > /dev/null; for i in mlx5_*; do cat $i/ports/1/gid_attrs/types/* 2>/dev/null | grep v >/dev/null && echo $i ; done; popd > /dev/null)
export NCCL_DEBUG=info
export OMP_NUM_THREADS=4

# #############################################
if [ -z "$RLAUNCH_REPLICA_TOTAL" ]; then
        export RLAUNCH_REPLICA_TOTAL=1
fi

if [ -z "$RLAUNCH_REPLICA" ]; then
        export RLAUNCH_REPLICA=0
fi
#############################################

if [[ $RLAUNCH_REPLICA == "0" ]]; then
	ifconfig $NCCL_SOCKET_IFNAME | grep inet | grep -v inet6 | awk '{print $2}' > master_ip	
fi

function finish {
	rm -rf master_ip
}


trap finish EXIT INT TERM

while [ ! -f master_ip ]; do
	echo "wait master_ip..."
	ls > /dev/null && sleep 1;
done

export MASTER_ADDR=$(cat master_ip)
echo "master_ip: $MASTER_ADDR"


cd /sharefs/baai-mrnd/liuguang/FlagStudio/text-img-model/src
rm -r logs/ViT-L-14-XMLR-L-eval-only
        # --dataset-type="json" \
        # --train-num-samples 2013329 \
        # --train-data="/sharefs/baai-mrnd/liuguang/FlagStudio/nb/laion-6+/wudaomm_laion_2m_clip.json"  \
        # --dataset-resampled \
sudo python3 -m torch.distributed.launch --nproc_per_node=1 \
       	--nnodes=$RLAUNCH_REPLICA_TOTAL --node_rank=$RLAUNCH_REPLICA \
	--master_addr=$MASTER_ADDR --master_port=12355 --use_env \
        training/main_deepspeed.py \
        --save-frequency 1 \
        --zeroshot-frequency 10 \
        --report-to="tensorboard" \
        --wandb-notes="laion400m_baai" \
        --imagenet-val="/sharefs/baai-mrnd/datasets/imagenet2012/val/" \
        --warmup 2000 \
        --batch-size=256 \
        --epochs=80 \
        --lr=1e-5 \
        --wd=0.05 \
        --norm_gradient_clip=5.0 \
        --workers=4 \
        --model ViT-L-14 \
        --name='ViT-L-14-XMLR-L-eval-only'\
        --seed 3407 \
        --text-distil='xlmr-large' \
        --gather-with-grad \
        --grad-checkpointing \
        --lock-image \
        --env_type="pytorchDDP" \
