Tristan commited on
Commit
d1976d3
1 Parent(s): 9b14cd7

Training in progress, epoch 0

Browse files
eval_job_output.txt CHANGED
@@ -1,4 +1,4 @@
1
- slurm submission log: 2024-05-26 22:30:10.163518
2
  created following sbatch script:
3
 
4
  ###############################
@@ -7,13 +7,13 @@ created following sbatch script:
7
 
8
  #SBATCH --account=nlp
9
  #SBATCH --cpus-per-task=16
10
- #SBATCH --dependency=afterok:7653550
11
  #SBATCH --gres=gpu:1
12
- #SBATCH --job-name=tthrush-job-4835795
13
  #SBATCH --mem=60G
14
  #SBATCH --nodelist=sphinx1
15
  #SBATCH --open-mode=append
16
- #SBATCH --output=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection/llms/pythia-70m_default_1/eval_job_output.txt
17
  #SBATCH --partition=sphinx
18
  #SBATCH --time=14-0
19
 
@@ -24,7 +24,7 @@ created following sbatch script:
24
  cd .
25
 
26
  # launch commands
27
- srun --unbuffered run_as_child_processes 'lm_eval --model hf --model_args pretrained=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection/llms/pythia-70m_default_1,revision=main,dtype=float16,trust_remote_code=True --tasks piqa,arc_easy,xnli_en,xnli_fr,xnli_de,xnli_es,sciq,lambada --device cuda --output_path /juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection/llms/pythia-70m_default_1/perf'
28
 
29
  ###############################
30
 
@@ -34,13 +34,13 @@ submission to slurm complete!
34
  ###############################
35
  slurm submission output
36
 
37
- Submitted batch job 7653551
38
 
39
 
40
 
41
  ###############################
42
 
43
- slurm submission log: 2024-05-26 22:32:51.779115
44
  created following sbatch script:
45
 
46
  ###############################
@@ -49,13 +49,13 @@ created following sbatch script:
49
 
50
  #SBATCH --account=nlp
51
  #SBATCH --cpus-per-task=16
52
- #SBATCH --dependency=afterok:7653580
53
  #SBATCH --gres=gpu:1
54
- #SBATCH --job-name=tthrush-job-1247637
55
  #SBATCH --mem=60G
56
  #SBATCH --nodelist=sphinx1
57
  #SBATCH --open-mode=append
58
- #SBATCH --output=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection/llms/pythia-70m_default_1/eval_job_output.txt
59
  #SBATCH --partition=sphinx
60
  #SBATCH --time=14-0
61
 
@@ -66,7 +66,7 @@ created following sbatch script:
66
  cd .
67
 
68
  # launch commands
69
- srun --unbuffered run_as_child_processes 'lm_eval --model hf --model_args pretrained=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection/llms/pythia-70m_default_1,revision=main,dtype=float16,trust_remote_code=True --tasks piqa,arc_easy,xnli_en,xnli_fr,xnli_de,xnli_es,sciq,lambada --device cuda --output_path /juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection/llms/pythia-70m_default_1/perf'
70
 
71
  ###############################
72
 
@@ -76,13 +76,13 @@ submission to slurm complete!
76
  ###############################
77
  slurm submission output
78
 
79
- Submitted batch job 7653581
80
 
81
 
82
 
83
  ###############################
84
 
85
- slurm submission log: 2024-05-26 22:58:04.194573
86
  created following sbatch script:
87
 
88
  ###############################
@@ -91,13 +91,13 @@ created following sbatch script:
91
 
92
  #SBATCH --account=nlp
93
  #SBATCH --cpus-per-task=16
94
- #SBATCH --dependency=afterok:7653635
95
  #SBATCH --gres=gpu:1
96
- #SBATCH --job-name=tthrush-job-3480785
97
  #SBATCH --mem=60G
98
  #SBATCH --nodelist=sphinx1
99
  #SBATCH --open-mode=append
100
- #SBATCH --output=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection/llms/pythia-70m_default_1/eval_job_output.txt
101
  #SBATCH --partition=sphinx
102
  #SBATCH --time=14-0
103
 
@@ -108,7 +108,7 @@ created following sbatch script:
108
  cd .
109
 
110
  # launch commands
111
- srun --unbuffered run_as_child_processes 'lm_eval --model hf --model_args pretrained=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection/llms/pythia-70m_default_1,revision=main,dtype=float16,trust_remote_code=True --tasks piqa,arc_easy,xnli_en,xnli_fr,xnli_de,xnli_es,sciq,lambada --device cuda --output_path /juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection/llms/pythia-70m_default_1/perf'
112
 
113
  ###############################
114
 
@@ -118,49 +118,7 @@ submission to slurm complete!
118
  ###############################
119
  slurm submission output
120
 
121
- Submitted batch job 7653636
122
-
123
-
124
-
125
- ###############################
126
-
127
- slurm submission log: 2024-05-26 23:16:37.954366
128
- created following sbatch script:
129
-
130
- ###############################
131
-
132
- #!/bin/bash
133
-
134
- #SBATCH --account=nlp
135
- #SBATCH --cpus-per-task=16
136
- #SBATCH --dependency=afterok:7653692
137
- #SBATCH --gres=gpu:1
138
- #SBATCH --job-name=tthrush-job-1152535
139
- #SBATCH --mem=60G
140
- #SBATCH --nodelist=sphinx1
141
- #SBATCH --open-mode=append
142
- #SBATCH --output=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection/llms/pythia-70m_default_1/eval_job_output.txt
143
- #SBATCH --partition=sphinx
144
- #SBATCH --time=14-0
145
-
146
- # activate your desired anaconda environment
147
- . /nlp/scr/tthrush/miniconda3/etc/profile.d/conda.sh ; conda activate pretraining-coreset-selection
148
-
149
- # cd to working directory
150
- cd .
151
-
152
- # launch commands
153
- srun --unbuffered run_as_child_processes 'lm_eval --model hf --model_args pretrained=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection/llms/pythia-70m_default_1,revision=main,dtype=float16,trust_remote_code=True --tasks piqa,arc_easy,xnli_en,xnli_fr,xnli_de,xnli_es,sciq,lambada --device cuda --output_path /juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection/llms/pythia-70m_default_1/perf'
154
-
155
- ###############################
156
-
157
- submission to slurm complete!
158
-
159
-
160
- ###############################
161
- slurm submission output
162
-
163
- Submitted batch job 7653693
164
 
165
 
166
 
 
1
+ slurm submission log: 2024-05-27 12:34:01.996971
2
  created following sbatch script:
3
 
4
  ###############################
 
7
 
8
  #SBATCH --account=nlp
9
  #SBATCH --cpus-per-task=16
10
+ #SBATCH --dependency=afterok:7656642
11
  #SBATCH --gres=gpu:1
12
+ #SBATCH --job-name=tthrush-job-3294461
13
  #SBATCH --mem=60G
14
  #SBATCH --nodelist=sphinx1
15
  #SBATCH --open-mode=append
16
+ #SBATCH --output=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection_big_diff/llms/pythia-70m_default_1/eval_job_output.txt
17
  #SBATCH --partition=sphinx
18
  #SBATCH --time=14-0
19
 
 
24
  cd .
25
 
26
  # launch commands
27
+ srun --unbuffered run_as_child_processes 'lm_eval --model hf --model_args pretrained=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection_big_diff/llms/pythia-70m_default_1,revision=main,dtype=float16,trust_remote_code=True --tasks piqa,arc_easy,xnli_en,xnli_fr,xnli_de,xnli_es,sciq,lambada --device cuda --output_path /juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection_big_diff/llms/pythia-70m_default_1/perf'
28
 
29
  ###############################
30
 
 
34
  ###############################
35
  slurm submission output
36
 
37
+ Submitted batch job 7656643
38
 
39
 
40
 
41
  ###############################
42
 
43
+ slurm submission log: 2024-05-27 23:18:48.257074
44
  created following sbatch script:
45
 
46
  ###############################
 
49
 
50
  #SBATCH --account=nlp
51
  #SBATCH --cpus-per-task=16
52
+ #SBATCH --dependency=afterok:7659748
53
  #SBATCH --gres=gpu:1
54
+ #SBATCH --job-name=tthrush-job-4032027
55
  #SBATCH --mem=60G
56
  #SBATCH --nodelist=sphinx1
57
  #SBATCH --open-mode=append
58
+ #SBATCH --output=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection_big_diff/llms/pythia-70m_default_1/eval_job_output.txt
59
  #SBATCH --partition=sphinx
60
  #SBATCH --time=14-0
61
 
 
66
  cd .
67
 
68
  # launch commands
69
+ srun --unbuffered run_as_child_processes 'lm_eval --model hf --model_args pretrained=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection_big_diff/llms/pythia-70m_default_1,revision=main,dtype=float16,trust_remote_code=True --tasks piqa,arc_easy,xnli_en,xnli_fr,xnli_de,xnli_es,sciq,lambada --device cuda --output_path /juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection_big_diff/llms/pythia-70m_default_1/perf'
70
 
71
  ###############################
72
 
 
76
  ###############################
77
  slurm submission output
78
 
79
+ Submitted batch job 7659749
80
 
81
 
82
 
83
  ###############################
84
 
85
+ slurm submission log: 2024-05-27 23:24:09.065268
86
  created following sbatch script:
87
 
88
  ###############################
 
91
 
92
  #SBATCH --account=nlp
93
  #SBATCH --cpus-per-task=16
94
+ #SBATCH --dependency=afterok:7659790
95
  #SBATCH --gres=gpu:1
96
+ #SBATCH --job-name=tthrush-job-1491095
97
  #SBATCH --mem=60G
98
  #SBATCH --nodelist=sphinx1
99
  #SBATCH --open-mode=append
100
+ #SBATCH --output=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection_big_diff/llms/pythia-70m_default_1/eval_job_output.txt
101
  #SBATCH --partition=sphinx
102
  #SBATCH --time=14-0
103
 
 
108
  cd .
109
 
110
  # launch commands
111
+ srun --unbuffered run_as_child_processes 'lm_eval --model hf --model_args pretrained=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection_big_diff/llms/pythia-70m_default_1,revision=main,dtype=float16,trust_remote_code=True --tasks piqa,arc_easy,xnli_en,xnli_fr,xnli_de,xnli_es,sciq,lambada --device cuda --output_path /juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection_big_diff/llms/pythia-70m_default_1/perf'
112
 
113
  ###############################
114
 
 
118
  ###############################
119
  slurm submission output
120
 
121
+ Submitted batch job 7659791
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
 
123
 
124
 
logs/events.out.tfevents.1716893914.sphinx2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46d4bdbfbe5761d7056e13b715344a777583501e6d625796459f1236c5881dcc
3
+ size 11766
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3a5094de8742d93ef1f8a47b5f1b0b0a47f72cbb3b81db632dd87ef0b0eeb980
3
  size 281715176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b6eea337fa1f3d6e834370139d0769d4a0bb08ecacb7a2eb27fc5db7c7b7954
3
  size 281715176
train_job_output.txt CHANGED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3d6d46f556e1340581a47698b7ca35dfaa15506d90afacd53b2991f3ff760a32
3
- size 5176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27bc4d89b732f28d40be81b5f6e283ed05e3aac6fb6fe01e1e2e7fe7f9b49605
3
+ size 5240