

# # python -u main_box.py
# # nohup python -u main.py > processing.log 2>&1 &
export CUDA_VISIBLE_DEVICES=2,4
# python  main.py
# python  main.py --batch_size 2
# nohup python  main.py --batch_size 5 > processing.log 2>&1 &
nohup python -m torch.distributed.launch --nproc_per_node=2 --use_env --master_port='1905' main.py --batch_size 5 > processing.log 2>&1 &

# nohup python -u main.py --batch_size 5 > processing.log 2>&1 &

# python -m torch.distributed.launch --nproc_per_node=2 --use_env --master_port='1905' main.py --batch_size 10
# torchrun --nproc-per-node=2  --master_port='1905' main.py --batch_size 5 
# python -m torch.distributed.launch --nproc_per_node=2 --use_env --master_port='1905' main.py --batch_size 5
# python -m torch.distributed.launch --nproc_per_node=2 --use_env --master_port='1905' main.py --batch_size 10

# nohup python -m torch.distributed.launch --nproc_per_node=2 --use_env --master_port='1905' main.py --batch_size 5 > processing_1208.log 2>&1 &

# nohup python  -m torch.distributed.launch --nproc_per_node=2 --use_env  main.py > processing.log 2>&1 &
# nohup python -m torch.distributed.launch --nproc_per_node=4 --use_env  main.py > processing.log 2>&1 &

# export CUDA_VISIBLE_DEVICES=6
# nohup python -u main.py > processing.log 2>&1 &

# export CUDA_VISIBLE_DEVICES=6
# python -u main.py --batch_size 8
# nohup python -u main.py > processing.log 2>&1 &
