#!/bin/sh
#SBATCH -J VMamba
#SBATCH -o /home/xiaridehehe/ownProgram/ChangeViT/result/out/train.out.%j
#SBATCH -e /home/xiaridehehe/ownProgram/ChangeViT/result/err/train.err.%j
#SBATCH --partition=gpu-l20
#SBATCH --gres=gpu:8
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task=64
source activate vmamba_ccj

python -m torch.distributed.launch --nnodes=1 --node_rank=0 --nproc_per_node=2 --master_addr="127.0.0.1" --master_port=29505 main.py \
        --cfg configs/vssm/vmambav2v_tiny_224.yaml --batch-size 128 \
        --data-path /home/xiaridehehe/ownProgram/data_set/imageNet/ --output output/imageNet
