Commit
•
bc8a855
1
Parent(s):
f6fa826
Upload 52 files
Browse files- all_results.json +6 -6
- checkpoint-1000/config.json +39 -0
- checkpoint-1000/generation_config.json +6 -0
- checkpoint-1000/merges.txt +0 -0
- checkpoint-1000/model.safetensors +3 -0
- checkpoint-1000/optimizer.pt +3 -0
- checkpoint-1000/rng_state.pth +3 -0
- checkpoint-1000/scheduler.pt +3 -0
- checkpoint-1000/special_tokens_map.json +5 -0
- checkpoint-1000/tokenizer.json +0 -0
- checkpoint-1000/tokenizer_config.json +19 -0
- checkpoint-1000/trainer_state.json +35 -0
- checkpoint-1000/training_args.bin +3 -0
- checkpoint-1000/vocab.json +0 -0
- checkpoint-1500/config.json +39 -0
- checkpoint-1500/generation_config.json +6 -0
- checkpoint-1500/merges.txt +0 -0
- checkpoint-1500/model.safetensors +3 -0
- checkpoint-1500/optimizer.pt +3 -0
- checkpoint-1500/rng_state.pth +3 -0
- checkpoint-1500/scheduler.pt +3 -0
- checkpoint-1500/special_tokens_map.json +5 -0
- checkpoint-1500/tokenizer.json +0 -0
- checkpoint-1500/tokenizer_config.json +19 -0
- checkpoint-1500/trainer_state.json +42 -0
- checkpoint-1500/training_args.bin +3 -0
- checkpoint-1500/vocab.json +0 -0
- checkpoint-500/config.json +39 -0
- checkpoint-500/generation_config.json +6 -0
- checkpoint-500/merges.txt +0 -0
- checkpoint-500/model.safetensors +3 -0
- checkpoint-500/optimizer.pt +3 -0
- checkpoint-500/rng_state.pth +3 -0
- checkpoint-500/scheduler.pt +3 -0
- checkpoint-500/special_tokens_map.json +5 -0
- checkpoint-500/tokenizer.json +0 -0
- checkpoint-500/tokenizer_config.json +19 -0
- checkpoint-500/trainer_state.json +28 -0
- checkpoint-500/training_args.bin +3 -0
- checkpoint-500/vocab.json +0 -0
- model.safetensors +1 -1
- train_results.json +6 -6
- trainer_state.json +30 -9
- training_args.bin +1 -1
all_results.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"epoch": 20.0,
|
3 |
-
"total_flos":
|
4 |
-
"train_loss":
|
5 |
-
"train_runtime":
|
6 |
-
"train_samples":
|
7 |
-
"train_samples_per_second":
|
8 |
-
"train_steps_per_second": 2.
|
9 |
}
|
|
|
1 |
{
|
2 |
"epoch": 20.0,
|
3 |
+
"total_flos": 1975367761920000.0,
|
4 |
+
"train_loss": 3.517263697574013,
|
5 |
+
"train_runtime": 756.053,
|
6 |
+
"train_samples": 189,
|
7 |
+
"train_samples_per_second": 5.0,
|
8 |
+
"train_steps_per_second": 2.513
|
9 |
}
|
checkpoint-1000/config.json
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "gpt2",
|
3 |
+
"activation_function": "gelu_new",
|
4 |
+
"architectures": [
|
5 |
+
"GPT2LMHeadModel"
|
6 |
+
],
|
7 |
+
"attn_pdrop": 0.1,
|
8 |
+
"bos_token_id": 50256,
|
9 |
+
"embd_pdrop": 0.1,
|
10 |
+
"eos_token_id": 50256,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"layer_norm_epsilon": 1e-05,
|
13 |
+
"model_type": "gpt2",
|
14 |
+
"n_ctx": 1024,
|
15 |
+
"n_embd": 768,
|
16 |
+
"n_head": 12,
|
17 |
+
"n_inner": null,
|
18 |
+
"n_layer": 12,
|
19 |
+
"n_positions": 1024,
|
20 |
+
"reorder_and_upcast_attn": false,
|
21 |
+
"resid_pdrop": 0.1,
|
22 |
+
"scale_attn_by_inverse_layer_idx": false,
|
23 |
+
"scale_attn_weights": true,
|
24 |
+
"summary_activation": null,
|
25 |
+
"summary_first_dropout": 0.1,
|
26 |
+
"summary_proj_to_labels": true,
|
27 |
+
"summary_type": "cls_index",
|
28 |
+
"summary_use_proj": true,
|
29 |
+
"task_specific_params": {
|
30 |
+
"text-generation": {
|
31 |
+
"do_sample": true,
|
32 |
+
"max_length": 50
|
33 |
+
}
|
34 |
+
},
|
35 |
+
"torch_dtype": "float32",
|
36 |
+
"transformers_version": "4.40.1",
|
37 |
+
"use_cache": true,
|
38 |
+
"vocab_size": 50257
|
39 |
+
}
|
checkpoint-1000/generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 50256,
|
4 |
+
"eos_token_id": 50256,
|
5 |
+
"transformers_version": "4.40.1"
|
6 |
+
}
|
checkpoint-1000/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-1000/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f4fd7a8bfdf47717916371809120058214f104b519f6d069a999a05f62796dfb
|
3 |
+
size 497774208
|
checkpoint-1000/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:714b0631f10e027164d03a4cbd015ca1e3ade1d584b4c3ba26a6c5901e085ee0
|
3 |
+
size 995642298
|
checkpoint-1000/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6735e66c381ae8e658c74ec6425d9f9bebcaa8d9244f2d8acd6a140ecc27fae5
|
3 |
+
size 14244
|
checkpoint-1000/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cd6455196257e514711e27db19cf7707d0d69acb0e59bfe5abb77b34a8fc6f34
|
3 |
+
size 1064
|
checkpoint-1000/special_tokens_map.json
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "<|endoftext|>",
|
3 |
+
"eos_token": "<|endoftext|>",
|
4 |
+
"unk_token": "<|endoftext|>"
|
5 |
+
}
|
checkpoint-1000/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-1000/tokenizer_config.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"added_tokens_decoder": {
|
4 |
+
"50256": {
|
5 |
+
"content": "<|endoftext|>",
|
6 |
+
"lstrip": false,
|
7 |
+
"normalized": true,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false,
|
10 |
+
"special": true
|
11 |
+
}
|
12 |
+
},
|
13 |
+
"bos_token": "<|endoftext|>",
|
14 |
+
"clean_up_tokenization_spaces": true,
|
15 |
+
"eos_token": "<|endoftext|>",
|
16 |
+
"model_max_length": 1024,
|
17 |
+
"tokenizer_class": "GPT2Tokenizer",
|
18 |
+
"unk_token": "<|endoftext|>"
|
19 |
+
}
|
checkpoint-1000/trainer_state.json
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 10.526315789473685,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 1000,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 5.2631578947368425,
|
13 |
+
"grad_norm": 5.739169120788574,
|
14 |
+
"learning_rate": 3.6842105263157895e-05,
|
15 |
+
"loss": 4.1507,
|
16 |
+
"step": 500
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 10.526315789473685,
|
20 |
+
"grad_norm": 6.05218505859375,
|
21 |
+
"learning_rate": 2.368421052631579e-05,
|
22 |
+
"loss": 3.5677,
|
23 |
+
"step": 1000
|
24 |
+
}
|
25 |
+
],
|
26 |
+
"logging_steps": 500,
|
27 |
+
"max_steps": 1900,
|
28 |
+
"num_input_tokens_seen": 0,
|
29 |
+
"num_train_epochs": 20,
|
30 |
+
"save_steps": 500,
|
31 |
+
"total_flos": 1039942287360000.0,
|
32 |
+
"train_batch_size": 2,
|
33 |
+
"trial_name": null,
|
34 |
+
"trial_params": null
|
35 |
+
}
|
checkpoint-1000/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:51c0c58500b75bc35ed88f4ea14b7b0b341100ad5032101fd651f3196728c575
|
3 |
+
size 4984
|
checkpoint-1000/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-1500/config.json
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "gpt2",
|
3 |
+
"activation_function": "gelu_new",
|
4 |
+
"architectures": [
|
5 |
+
"GPT2LMHeadModel"
|
6 |
+
],
|
7 |
+
"attn_pdrop": 0.1,
|
8 |
+
"bos_token_id": 50256,
|
9 |
+
"embd_pdrop": 0.1,
|
10 |
+
"eos_token_id": 50256,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"layer_norm_epsilon": 1e-05,
|
13 |
+
"model_type": "gpt2",
|
14 |
+
"n_ctx": 1024,
|
15 |
+
"n_embd": 768,
|
16 |
+
"n_head": 12,
|
17 |
+
"n_inner": null,
|
18 |
+
"n_layer": 12,
|
19 |
+
"n_positions": 1024,
|
20 |
+
"reorder_and_upcast_attn": false,
|
21 |
+
"resid_pdrop": 0.1,
|
22 |
+
"scale_attn_by_inverse_layer_idx": false,
|
23 |
+
"scale_attn_weights": true,
|
24 |
+
"summary_activation": null,
|
25 |
+
"summary_first_dropout": 0.1,
|
26 |
+
"summary_proj_to_labels": true,
|
27 |
+
"summary_type": "cls_index",
|
28 |
+
"summary_use_proj": true,
|
29 |
+
"task_specific_params": {
|
30 |
+
"text-generation": {
|
31 |
+
"do_sample": true,
|
32 |
+
"max_length": 50
|
33 |
+
}
|
34 |
+
},
|
35 |
+
"torch_dtype": "float32",
|
36 |
+
"transformers_version": "4.40.1",
|
37 |
+
"use_cache": true,
|
38 |
+
"vocab_size": 50257
|
39 |
+
}
|
checkpoint-1500/generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 50256,
|
4 |
+
"eos_token_id": 50256,
|
5 |
+
"transformers_version": "4.40.1"
|
6 |
+
}
|
checkpoint-1500/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-1500/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fc2139ae9587189eb4c43194e35990c1f751f4cb06b5dda3838c81fca447ebe8
|
3 |
+
size 497774208
|
checkpoint-1500/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:54cc554636c599f6bd8e53d560827fb586a38066e395d85525de463e41c06c5d
|
3 |
+
size 995642298
|
checkpoint-1500/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3ccc5f358be43730bcf4e31825be22c38d78f2bdde04171326913bc3ae3dc977
|
3 |
+
size 14244
|
checkpoint-1500/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e40ad999b8d4d3e2971940ff7846b0dcd3d1bd38df26532a701848e67ca2baeb
|
3 |
+
size 1064
|
checkpoint-1500/special_tokens_map.json
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "<|endoftext|>",
|
3 |
+
"eos_token": "<|endoftext|>",
|
4 |
+
"unk_token": "<|endoftext|>"
|
5 |
+
}
|
checkpoint-1500/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-1500/tokenizer_config.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"added_tokens_decoder": {
|
4 |
+
"50256": {
|
5 |
+
"content": "<|endoftext|>",
|
6 |
+
"lstrip": false,
|
7 |
+
"normalized": true,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false,
|
10 |
+
"special": true
|
11 |
+
}
|
12 |
+
},
|
13 |
+
"bos_token": "<|endoftext|>",
|
14 |
+
"clean_up_tokenization_spaces": true,
|
15 |
+
"eos_token": "<|endoftext|>",
|
16 |
+
"model_max_length": 1024,
|
17 |
+
"tokenizer_class": "GPT2Tokenizer",
|
18 |
+
"unk_token": "<|endoftext|>"
|
19 |
+
}
|
checkpoint-1500/trainer_state.json
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 15.789473684210526,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 1500,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 5.2631578947368425,
|
13 |
+
"grad_norm": 5.739169120788574,
|
14 |
+
"learning_rate": 3.6842105263157895e-05,
|
15 |
+
"loss": 4.1507,
|
16 |
+
"step": 500
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 10.526315789473685,
|
20 |
+
"grad_norm": 6.05218505859375,
|
21 |
+
"learning_rate": 2.368421052631579e-05,
|
22 |
+
"loss": 3.5677,
|
23 |
+
"step": 1000
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"epoch": 15.789473684210526,
|
27 |
+
"grad_norm": 5.807043552398682,
|
28 |
+
"learning_rate": 1.0526315789473684e-05,
|
29 |
+
"loss": 3.213,
|
30 |
+
"step": 1500
|
31 |
+
}
|
32 |
+
],
|
33 |
+
"logging_steps": 500,
|
34 |
+
"max_steps": 1900,
|
35 |
+
"num_input_tokens_seen": 0,
|
36 |
+
"num_train_epochs": 20,
|
37 |
+
"save_steps": 500,
|
38 |
+
"total_flos": 1559913431040000.0,
|
39 |
+
"train_batch_size": 2,
|
40 |
+
"trial_name": null,
|
41 |
+
"trial_params": null
|
42 |
+
}
|
checkpoint-1500/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:51c0c58500b75bc35ed88f4ea14b7b0b341100ad5032101fd651f3196728c575
|
3 |
+
size 4984
|
checkpoint-1500/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-500/config.json
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "gpt2",
|
3 |
+
"activation_function": "gelu_new",
|
4 |
+
"architectures": [
|
5 |
+
"GPT2LMHeadModel"
|
6 |
+
],
|
7 |
+
"attn_pdrop": 0.1,
|
8 |
+
"bos_token_id": 50256,
|
9 |
+
"embd_pdrop": 0.1,
|
10 |
+
"eos_token_id": 50256,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"layer_norm_epsilon": 1e-05,
|
13 |
+
"model_type": "gpt2",
|
14 |
+
"n_ctx": 1024,
|
15 |
+
"n_embd": 768,
|
16 |
+
"n_head": 12,
|
17 |
+
"n_inner": null,
|
18 |
+
"n_layer": 12,
|
19 |
+
"n_positions": 1024,
|
20 |
+
"reorder_and_upcast_attn": false,
|
21 |
+
"resid_pdrop": 0.1,
|
22 |
+
"scale_attn_by_inverse_layer_idx": false,
|
23 |
+
"scale_attn_weights": true,
|
24 |
+
"summary_activation": null,
|
25 |
+
"summary_first_dropout": 0.1,
|
26 |
+
"summary_proj_to_labels": true,
|
27 |
+
"summary_type": "cls_index",
|
28 |
+
"summary_use_proj": true,
|
29 |
+
"task_specific_params": {
|
30 |
+
"text-generation": {
|
31 |
+
"do_sample": true,
|
32 |
+
"max_length": 50
|
33 |
+
}
|
34 |
+
},
|
35 |
+
"torch_dtype": "float32",
|
36 |
+
"transformers_version": "4.40.1",
|
37 |
+
"use_cache": true,
|
38 |
+
"vocab_size": 50257
|
39 |
+
}
|
checkpoint-500/generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 50256,
|
4 |
+
"eos_token_id": 50256,
|
5 |
+
"transformers_version": "4.40.1"
|
6 |
+
}
|
checkpoint-500/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-500/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c15d118dc1bb101b75c8c5b215919fb2fcdf45b6be36bcf9fc2e5db72544377f
|
3 |
+
size 497774208
|
checkpoint-500/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7b41b6fe5f43206e7c39ff2489d0d8671782d28357fd343e7fe02605aff82daf
|
3 |
+
size 995642298
|
checkpoint-500/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:42a8366b5899e212fa05846b436adc1242d8560b997a0d87623e65e8a0087834
|
3 |
+
size 14244
|
checkpoint-500/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ec6a9ff6854901acd18edb8e3b85bd42be67496643ba0427ba18baa7092b724a
|
3 |
+
size 1064
|
checkpoint-500/special_tokens_map.json
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "<|endoftext|>",
|
3 |
+
"eos_token": "<|endoftext|>",
|
4 |
+
"unk_token": "<|endoftext|>"
|
5 |
+
}
|
checkpoint-500/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-500/tokenizer_config.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"added_tokens_decoder": {
|
4 |
+
"50256": {
|
5 |
+
"content": "<|endoftext|>",
|
6 |
+
"lstrip": false,
|
7 |
+
"normalized": true,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false,
|
10 |
+
"special": true
|
11 |
+
}
|
12 |
+
},
|
13 |
+
"bos_token": "<|endoftext|>",
|
14 |
+
"clean_up_tokenization_spaces": true,
|
15 |
+
"eos_token": "<|endoftext|>",
|
16 |
+
"model_max_length": 1024,
|
17 |
+
"tokenizer_class": "GPT2Tokenizer",
|
18 |
+
"unk_token": "<|endoftext|>"
|
19 |
+
}
|
checkpoint-500/trainer_state.json
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 5.2631578947368425,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 500,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 5.2631578947368425,
|
13 |
+
"grad_norm": 5.739169120788574,
|
14 |
+
"learning_rate": 3.6842105263157895e-05,
|
15 |
+
"loss": 4.1507,
|
16 |
+
"step": 500
|
17 |
+
}
|
18 |
+
],
|
19 |
+
"logging_steps": 500,
|
20 |
+
"max_steps": 1900,
|
21 |
+
"num_input_tokens_seen": 0,
|
22 |
+
"num_train_epochs": 20,
|
23 |
+
"save_steps": 500,
|
24 |
+
"total_flos": 519971143680000.0,
|
25 |
+
"train_batch_size": 2,
|
26 |
+
"trial_name": null,
|
27 |
+
"trial_params": null
|
28 |
+
}
|
checkpoint-500/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:51c0c58500b75bc35ed88f4ea14b7b0b341100ad5032101fd651f3196728c575
|
3 |
+
size 4984
|
checkpoint-500/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 497774208
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:70d28985f6238706062b5d46f6d4b311f71723da7106434c508e375a3e3ea52f
|
3 |
size 497774208
|
train_results.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"epoch": 20.0,
|
3 |
-
"total_flos":
|
4 |
-
"train_loss":
|
5 |
-
"train_runtime":
|
6 |
-
"train_samples":
|
7 |
-
"train_samples_per_second":
|
8 |
-
"train_steps_per_second": 2.
|
9 |
}
|
|
|
1 |
{
|
2 |
"epoch": 20.0,
|
3 |
+
"total_flos": 1975367761920000.0,
|
4 |
+
"train_loss": 3.517263697574013,
|
5 |
+
"train_runtime": 756.053,
|
6 |
+
"train_samples": 189,
|
7 |
+
"train_samples_per_second": 5.0,
|
8 |
+
"train_steps_per_second": 2.513
|
9 |
}
|
trainer_state.json
CHANGED
@@ -3,27 +3,48 @@
|
|
3 |
"best_model_checkpoint": null,
|
4 |
"epoch": 20.0,
|
5 |
"eval_steps": 500,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
10 |
"log_history": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
{
|
12 |
"epoch": 20.0,
|
13 |
-
"step":
|
14 |
-
"total_flos":
|
15 |
-
"train_loss":
|
16 |
-
"train_runtime":
|
17 |
-
"train_samples_per_second":
|
18 |
-
"train_steps_per_second": 2.
|
19 |
}
|
20 |
],
|
21 |
"logging_steps": 500,
|
22 |
-
"max_steps":
|
23 |
"num_input_tokens_seen": 0,
|
24 |
"num_train_epochs": 20,
|
25 |
"save_steps": 500,
|
26 |
-
"total_flos":
|
27 |
"train_batch_size": 2,
|
28 |
"trial_name": null,
|
29 |
"trial_params": null
|
|
|
3 |
"best_model_checkpoint": null,
|
4 |
"epoch": 20.0,
|
5 |
"eval_steps": 500,
|
6 |
+
"global_step": 1900,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
10 |
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 5.2631578947368425,
|
13 |
+
"grad_norm": 5.739169120788574,
|
14 |
+
"learning_rate": 3.6842105263157895e-05,
|
15 |
+
"loss": 4.1507,
|
16 |
+
"step": 500
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 10.526315789473685,
|
20 |
+
"grad_norm": 6.05218505859375,
|
21 |
+
"learning_rate": 2.368421052631579e-05,
|
22 |
+
"loss": 3.5677,
|
23 |
+
"step": 1000
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"epoch": 15.789473684210526,
|
27 |
+
"grad_norm": 5.807043552398682,
|
28 |
+
"learning_rate": 1.0526315789473684e-05,
|
29 |
+
"loss": 3.213,
|
30 |
+
"step": 1500
|
31 |
+
},
|
32 |
{
|
33 |
"epoch": 20.0,
|
34 |
+
"step": 1900,
|
35 |
+
"total_flos": 1975367761920000.0,
|
36 |
+
"train_loss": 3.517263697574013,
|
37 |
+
"train_runtime": 756.053,
|
38 |
+
"train_samples_per_second": 5.0,
|
39 |
+
"train_steps_per_second": 2.513
|
40 |
}
|
41 |
],
|
42 |
"logging_steps": 500,
|
43 |
+
"max_steps": 1900,
|
44 |
"num_input_tokens_seen": 0,
|
45 |
"num_train_epochs": 20,
|
46 |
"save_steps": 500,
|
47 |
+
"total_flos": 1975367761920000.0,
|
48 |
"train_batch_size": 2,
|
49 |
"trial_name": null,
|
50 |
"trial_params": null
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4984
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:51c0c58500b75bc35ed88f4ea14b7b0b341100ad5032101fd651f3196728c575
|
3 |
size 4984
|