Training in progress, epoch 1
Browse files- config.json +1 -1
- eval_job_output.txt +6 -6
- logs/events.out.tfevents.1715991452.sphinx2 +3 -0
- model.safetensors +1 -1
- train_job_output.txt +0 -0
- training_args.bin +2 -2
config.json
CHANGED
@@ -23,7 +23,7 @@
|
|
23 |
"rotary_pct": 0.25,
|
24 |
"tie_word_embeddings": false,
|
25 |
"torch_dtype": "float32",
|
26 |
-
"transformers_version": "4.
|
27 |
"use_cache": true,
|
28 |
"use_parallel_residual": true,
|
29 |
"vocab_size": 50304
|
|
|
23 |
"rotary_pct": 0.25,
|
24 |
"tie_word_embeddings": false,
|
25 |
"torch_dtype": "float32",
|
26 |
+
"transformers_version": "4.40.2",
|
27 |
"use_cache": true,
|
28 |
"use_parallel_residual": true,
|
29 |
"vocab_size": 50304
|
eval_job_output.txt
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
slurm submission log: 2024-05-
|
2 |
created following sbatch script:
|
3 |
|
4 |
###############################
|
@@ -7,13 +7,13 @@ created following sbatch script:
|
|
7 |
|
8 |
#SBATCH --account=nlp
|
9 |
#SBATCH --cpus-per-task=16
|
10 |
-
#SBATCH --dependency=afterok:
|
11 |
#SBATCH --gres=gpu:1
|
12 |
-
#SBATCH --job-name=tthrush-job-
|
13 |
#SBATCH --mem=60G
|
14 |
#SBATCH --nodelist=sphinx2
|
15 |
#SBATCH --open-mode=append
|
16 |
-
#SBATCH --output=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/
|
17 |
#SBATCH --partition=sphinx
|
18 |
#SBATCH --time=14-0
|
19 |
|
@@ -24,7 +24,7 @@ created following sbatch script:
|
|
24 |
cd .
|
25 |
|
26 |
# launch commands
|
27 |
-
srun --unbuffered run_as_child_processes 'lm_eval --model hf --model_args pretrained=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/
|
28 |
|
29 |
###############################
|
30 |
|
@@ -34,7 +34,7 @@ submission to slurm complete!
|
|
34 |
###############################
|
35 |
slurm submission output
|
36 |
|
37 |
-
Submitted batch job
|
38 |
|
39 |
|
40 |
|
|
|
1 |
+
slurm submission log: 2024-05-17 11:42:19.909263
|
2 |
created following sbatch script:
|
3 |
|
4 |
###############################
|
|
|
7 |
|
8 |
#SBATCH --account=nlp
|
9 |
#SBATCH --cpus-per-task=16
|
10 |
+
#SBATCH --dependency=afterok:7625429
|
11 |
#SBATCH --gres=gpu:1
|
12 |
+
#SBATCH --job-name=tthrush-job-3702838
|
13 |
#SBATCH --mem=60G
|
14 |
#SBATCH --nodelist=sphinx2
|
15 |
#SBATCH --open-mode=append
|
16 |
+
#SBATCH --output=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/llms_more_data/pythia-70m_piqa/eval_job_output.txt
|
17 |
#SBATCH --partition=sphinx
|
18 |
#SBATCH --time=14-0
|
19 |
|
|
|
24 |
cd .
|
25 |
|
26 |
# launch commands
|
27 |
+
srun --unbuffered run_as_child_processes 'lm_eval --model hf --model_args pretrained=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/llms_more_data/pythia-70m_piqa,revision=main,dtype=float16,trust_remote_code=True --tasks xnli_en,xnli_fr,sciq,piqa,lambada,arc_easy --device cuda --output_path /juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/llms_more_data/pythia-70m_piqa/perf'
|
28 |
|
29 |
###############################
|
30 |
|
|
|
34 |
###############################
|
35 |
slurm submission output
|
36 |
|
37 |
+
Submitted batch job 7625430
|
38 |
|
39 |
|
40 |
|
logs/events.out.tfevents.1715991452.sphinx2
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9b1a05dbc500bc5ab1bda5cd149d7b36ab74c4fae2aba28ff2c9a7b6271bbba1
|
3 |
+
size 95376
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 281715176
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1d81c43125e8d6cf5c649f2ee162fdd68b9c8a127e463f79f7f060659106d15c
|
3 |
size 281715176
|
train_job_output.txt
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f8393c460eb50590ae60427791c221a45176e7cf1659cd2b0b8665a63ce559b8
|
3 |
+
size 5176
|