Tristan commited on
Commit
e689af1
1 Parent(s): 36ae1c1

Training in progress, epoch 1

Browse files
config.json CHANGED
@@ -23,7 +23,7 @@
23
  "rotary_pct": 0.25,
24
  "tie_word_embeddings": false,
25
  "torch_dtype": "float32",
26
- "transformers_version": "4.39.3",
27
  "use_cache": true,
28
  "use_parallel_residual": true,
29
  "vocab_size": 50304
 
23
  "rotary_pct": 0.25,
24
  "tie_word_embeddings": false,
25
  "torch_dtype": "float32",
26
+ "transformers_version": "4.40.2",
27
  "use_cache": true,
28
  "use_parallel_residual": true,
29
  "vocab_size": 50304
eval_job_output.txt CHANGED
@@ -1,4 +1,4 @@
1
- slurm submission log: 2024-05-11 22:52:02.402564
2
  created following sbatch script:
3
 
4
  ###############################
@@ -7,24 +7,24 @@ created following sbatch script:
7
 
8
  #SBATCH --account=nlp
9
  #SBATCH --cpus-per-task=16
10
- #SBATCH --dependency=afterok:
11
  #SBATCH --gres=gpu:1
12
- #SBATCH --job-name=tthrush-job-4888498
13
  #SBATCH --mem=60G
14
- #SBATCH --nodelist=sphinx2
15
  #SBATCH --open-mode=append
16
- #SBATCH --output=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/llms_5/pythia-70m_default/eval_job_output.txt
17
  #SBATCH --partition=sphinx
18
  #SBATCH --time=14-0
19
 
20
  # activate your desired anaconda environment
21
- . /nlp/scr/tthrush/miniconda3/etc/profile.d/conda.sh ; conda activate pretraining-coreset-selection
22
 
23
  # cd to working directory
24
  cd .
25
 
26
  # launch commands
27
- srun --unbuffered run_as_child_processes 'lm_eval --model hf --model_args pretrained=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/llms_5/pythia-70m_default,revision=main,dtype=float16,trust_remote_code=True --tasks xnli_en,xnli_fr,sciq,piqa,lambada,arc_easy --device cuda --output_path /juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/llms_5/pythia-70m_default/perf'
28
 
29
  ###############################
30
 
@@ -34,91 +34,7 @@ submission to slurm complete!
34
  ###############################
35
  slurm submission output
36
 
37
-
38
-
39
- sbatch: error: Batch job submission failed: Job dependency problem
40
-
41
- ###############################
42
-
43
- slurm submission log: 2024-05-11 22:53:20.065335
44
- created following sbatch script:
45
-
46
- ###############################
47
-
48
- #!/bin/bash
49
-
50
- #SBATCH --account=nlp
51
- #SBATCH --cpus-per-task=16
52
- #SBATCH --dependency=afterok:7599822
53
- #SBATCH --gres=gpu:1
54
- #SBATCH --job-name=tthrush-job-2562954
55
- #SBATCH --mem=60G
56
- #SBATCH --nodelist=sphinx2
57
- #SBATCH --open-mode=append
58
- #SBATCH --output=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/llms_5/pythia-70m_default/eval_job_output.txt
59
- #SBATCH --partition=sphinx
60
- #SBATCH --time=14-0
61
-
62
- # activate your desired anaconda environment
63
- . /nlp/scr/tthrush/miniconda3/etc/profile.d/conda.sh ; conda activate pretraining-coreset-selection
64
-
65
- # cd to working directory
66
- cd .
67
-
68
- # launch commands
69
- srun --unbuffered run_as_child_processes 'lm_eval --model hf --model_args pretrained=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/llms_5/pythia-70m_default,revision=main,dtype=float16,trust_remote_code=True --tasks xnli_en,xnli_fr,sciq,piqa,lambada,arc_easy --device cuda --output_path /juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/llms_5/pythia-70m_default/perf'
70
-
71
- ###############################
72
-
73
- submission to slurm complete!
74
-
75
-
76
- ###############################
77
- slurm submission output
78
-
79
- Submitted batch job 7599823
80
-
81
-
82
-
83
- ###############################
84
-
85
- slurm submission log: 2024-05-11 23:09:48.287855
86
- created following sbatch script:
87
-
88
- ###############################
89
-
90
- #!/bin/bash
91
-
92
- #SBATCH --account=nlp
93
- #SBATCH --cpus-per-task=16
94
- #SBATCH --dependency=afterok:7599868
95
- #SBATCH --gres=gpu:1
96
- #SBATCH --job-name=tthrush-job-4073620
97
- #SBATCH --mem=60G
98
- #SBATCH --nodelist=sphinx2
99
- #SBATCH --open-mode=append
100
- #SBATCH --output=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/llms_5/pythia-70m_default/eval_job_output.txt
101
- #SBATCH --partition=sphinx
102
- #SBATCH --time=14-0
103
-
104
- # activate your desired anaconda environment
105
- . /nlp/scr/tthrush/miniconda3/etc/profile.d/conda.sh ; conda activate pretraining-coreset-selection
106
-
107
- # cd to working directory
108
- cd .
109
-
110
- # launch commands
111
- srun --unbuffered run_as_child_processes 'lm_eval --model hf --model_args pretrained=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/llms_5/pythia-70m_default,revision=main,dtype=float16,trust_remote_code=True --tasks xnli_en,xnli_fr,sciq,piqa,lambada,arc_easy --device cuda --output_path /juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/llms_5/pythia-70m_default/perf'
112
-
113
- ###############################
114
-
115
- submission to slurm complete!
116
-
117
-
118
- ###############################
119
- slurm submission output
120
-
121
- Submitted batch job 7599869
122
 
123
 
124
 
 
1
+ slurm submission log: 2024-05-20 10:11:59.906526
2
  created following sbatch script:
3
 
4
  ###############################
 
7
 
8
  #SBATCH --account=nlp
9
  #SBATCH --cpus-per-task=16
10
+ #SBATCH --dependency=afterok:7635790
11
  #SBATCH --gres=gpu:1
12
+ #SBATCH --job-name=tthrush-job-1011450
13
  #SBATCH --mem=60G
14
+ #SBATCH --nodelist=sphinx1
15
  #SBATCH --open-mode=append
16
+ #SBATCH --output=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/70m_llms_more_data_test_normalized/pythia-70m_default_1/eval_job_output.txt
17
  #SBATCH --partition=sphinx
18
  #SBATCH --time=14-0
19
 
20
  # activate your desired anaconda environment
21
+ . /nlp/scr/tthrush/miniconda3/envs/pretraining-coreset-selection/etc/profile.d/conda.sh ; conda activate pretraining-coreset-selection
22
 
23
  # cd to working directory
24
  cd .
25
 
26
  # launch commands
27
+ srun --unbuffered run_as_child_processes 'lm_eval --model hf --model_args pretrained=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/70m_llms_more_data_test_normalized/pythia-70m_default_1,revision=main,dtype=float16,trust_remote_code=True --tasks xnli_en,xnli_fr,sciq,piqa,lambada,arc_easy,default --device cuda --output_path /juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/70m_llms_more_data_test_normalized/pythia-70m_default_1/perf'
28
 
29
  ###############################
30
 
 
34
  ###############################
35
  slurm submission output
36
 
37
+ Submitted batch job 7635791
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
 
40
 
logs/events.out.tfevents.1716245528.sphinx2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b4de350dcd464831a416cf5a86a0b0f2b7fd0003923c3a64438cc31390713a6
3
+ size 95455
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5b951b996bf316cd5f1f805ff62c7a17ff38b1796cef1aa7ed93aab71b0e1ab3
3
  size 281715176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf7b47385f852fe85e4a2837781a49a0e236a5e632a260688f4ae5c333f57f3a
3
  size 281715176
train_job_output.txt CHANGED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6cf3be7689dc496c68d9042ba7ef79c5166b2cf3d4a5717ebfc52a3a1503f499
3
- size 5112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0433006cce1a247878d083c2265b5f8ae1fd4d1878e57fb6a9ba2f55d3ef8231
3
+ size 5240