File size: 2,826 Bytes
2660fcd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
slurm submission log: 2024-05-09 22:15:10.985393
created following sbatch script: 

###############################

#!/bin/bash

#SBATCH --account=nlp
#SBATCH --cpus-per-task=16
#SBATCH --dependency=afterok:7593071
#SBATCH --gres=gpu:2
#SBATCH --job-name=tthrush-job-2376031
#SBATCH --mem=400G
#SBATCH --nodelist=sphinx2
#SBATCH --open-mode=append
#SBATCH --output=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/llms_3/pythia-70m_sciq/train_job_o
#SBATCH --partition=sphinx
#SBATCH --time=14-0

# activate your desired anaconda environment
. /nlp/scr/tthrush/miniconda3/envs/pretraining-coreset-selection/etc/profile.d/conda.sh ; conda activate pretraining-coreset-selection

# cd to working directory
cd .

# launch commands
srun --unbuffered run_as_child_processes 'utput.txt' 'torchrun --master_port 29502 --nproc_per_node=2 train_llm.py --dataset_id /juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/train_data_3/sciq --output_dir /juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/llms_3/pythia-70m_sciq --o    utput_hub_id pythia-70m_sciq --model_id EleutherAI/pythia-70m --num_train_epochs 1 --learning_rate 1e-3 --warmup_ratio=0.1 --gradient_accumulation_steps 2'

###############################

submission to slurm complete!


###############################
slurm submission output

Submitted batch job 7593072



###############################

slurm submission log: 2024-05-09 23:03:13.895695
created following sbatch script: 

###############################

#!/bin/bash

#SBATCH --account=nlp
#SBATCH --cpus-per-task=16
#SBATCH --dependency=afterok:7593149
#SBATCH --gres=gpu:2
#SBATCH --job-name=tthrush-job-3384549
#SBATCH --mem=400G
#SBATCH --nodelist=sphinx2
#SBATCH --open-mode=append
#SBATCH --output=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/llms_3/pythia-70m_sciq/train_job_o
#SBATCH --partition=sphinx
#SBATCH --time=14-0

# activate your desired anaconda environment
. /nlp/scr/tthrush/miniconda3/envs/pretraining-coreset-selection/etc/profile.d/conda.sh ; conda activate pretraining-coreset-selection

# cd to working directory
cd .

# launch commands
srun --unbuffered run_as_child_processes 'utput.txt' 'torchrun --master_port 29502 --nproc_per_node=2 train_llm.py --dataset_id /juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/train_data_3/sciq --output_dir /juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/llms_3/pythia-70m_sciq --o    utput_hub_id pythia-70m_sciq --model_id EleutherAI/pythia-70m --num_train_epochs 1 --learning_rate 1e-3 --warmup_ratio=0.1 --gradient_accumulation_steps 2'

###############################

submission to slurm complete!


###############################
slurm submission output

Submitted batch job 7593150



###############################