#!/bin/bash #SBATCH --job-name=inference #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! #SBATCH --cpus-per-task=24 # number of cores per tasks #SBATCH --hint=nomultithread # we get physical cores not logical #SBATCH --gres=gpu:1 # number of gpus #SBATCH --time 12:00:00 # maximum execution time (HH:MM:SS) #SBATCH --output=%x-%j.out # output file name #SBATCH --account=ajs@v100 set -x -e source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0 echo "START TIME: $(date)" NUM_SAMPLES=3290900 NUM_SAMPLES_PER_JOB=32909 #NUM_SAMPLES_PER_JOB=128 TASK_START=$((($SLURM_ARRAY_TASK_ID) * $NUM_SAMPLES_PER_JOB)) TASK_END=$(($TASK_START + $NUM_SAMPLES_PER_JOB)) echo "Starting task $SLURM_ARRAY_TASK_ID with start $TASK_START and end $TASK_END" python inference_java.py --start $TASK_START --end $TASK_END