Training in progress, epoch 1
Browse files- logs/events.out.tfevents.1709746175.d65e8524b0f6.5185.14 +2 -2
- logs/events.out.tfevents.1709746719.d65e8524b0f6.5185.15 +3 -0
- logs/events.out.tfevents.1709746826.d65e8524b0f6.5185.16 +3 -0
- model.safetensors +1 -1
- run-14/checkpoint-196/config.json +34 -0
- run-14/checkpoint-196/model.safetensors +3 -0
- run-14/checkpoint-196/optimizer.pt +3 -0
- run-14/checkpoint-196/rng_state.pth +3 -0
- run-14/checkpoint-196/scheduler.pt +3 -0
- run-14/checkpoint-196/special_tokens_map.json +7 -0
- run-14/checkpoint-196/tokenizer.json +0 -0
- run-14/checkpoint-196/tokenizer_config.json +57 -0
- run-14/checkpoint-196/trainer_state.json +103 -0
- run-14/checkpoint-196/training_args.bin +3 -0
- run-14/checkpoint-196/vocab.txt +0 -0
- run-14/checkpoint-245/config.json +34 -0
- run-14/checkpoint-245/model.safetensors +3 -0
- run-14/checkpoint-245/optimizer.pt +3 -0
- run-14/checkpoint-245/rng_state.pth +3 -0
- run-14/checkpoint-245/scheduler.pt +3 -0
- run-14/checkpoint-245/special_tokens_map.json +7 -0
- run-14/checkpoint-245/tokenizer.json +0 -0
- run-14/checkpoint-245/tokenizer_config.json +57 -0
- run-14/checkpoint-245/trainer_state.json +122 -0
- run-14/checkpoint-245/training_args.bin +3 -0
- run-14/checkpoint-245/vocab.txt +0 -0
- run-16/checkpoint-55/config.json +34 -0
- run-16/checkpoint-55/model.safetensors +3 -0
- run-16/checkpoint-55/optimizer.pt +3 -0
- run-16/checkpoint-55/rng_state.pth +3 -0
- run-16/checkpoint-55/scheduler.pt +3 -0
- run-16/checkpoint-55/special_tokens_map.json +7 -0
- run-16/checkpoint-55/tokenizer.json +0 -0
- run-16/checkpoint-55/tokenizer_config.json +57 -0
- run-16/checkpoint-55/trainer_state.json +46 -0
- run-16/checkpoint-55/training_args.bin +3 -0
- run-16/checkpoint-55/vocab.txt +0 -0
- tokenizer.json +1 -1
- training_args.bin +1 -1
logs/events.out.tfevents.1709746175.d65e8524b0f6.5185.14
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8d435472f55d161fe7bb026e89d9690d7fe4cab1b16adb090dfbe328c1a0bc15
|
3 |
+
size 8388
|
logs/events.out.tfevents.1709746719.d65e8524b0f6.5185.15
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4346fec82fe37cc151fb8d7fc72058bbdc5ffd10f9574882c4368b3f846799a4
|
3 |
+
size 5315
|
logs/events.out.tfevents.1709746826.d65e8524b0f6.5185.16
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7c31fb9097de6b5e6623426f94dc8d1c3a8477f0c85915b68c4d9c3acbbcc3df
|
3 |
+
size 5315
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 17549312
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:840f48c35da7a97bf8423c4d56b59abfff31d02d3333ba76897059f3fe2011af
|
3 |
size 17549312
|
run-14/checkpoint-196/config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "google/bert_uncased_L-2_H-128_A-2",
|
3 |
+
"architectures": [
|
4 |
+
"BertForSequenceClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"hidden_act": "gelu",
|
9 |
+
"hidden_dropout_prob": 0.1,
|
10 |
+
"hidden_size": 128,
|
11 |
+
"id2label": {
|
12 |
+
"0": "negative",
|
13 |
+
"1": "positive"
|
14 |
+
},
|
15 |
+
"initializer_range": 0.02,
|
16 |
+
"intermediate_size": 512,
|
17 |
+
"label2id": {
|
18 |
+
"negative": "0",
|
19 |
+
"positive": "1"
|
20 |
+
},
|
21 |
+
"layer_norm_eps": 1e-12,
|
22 |
+
"max_position_embeddings": 512,
|
23 |
+
"model_type": "bert",
|
24 |
+
"num_attention_heads": 2,
|
25 |
+
"num_hidden_layers": 2,
|
26 |
+
"pad_token_id": 0,
|
27 |
+
"position_embedding_type": "absolute",
|
28 |
+
"problem_type": "single_label_classification",
|
29 |
+
"torch_dtype": "float32",
|
30 |
+
"transformers_version": "4.38.2",
|
31 |
+
"type_vocab_size": 2,
|
32 |
+
"use_cache": true,
|
33 |
+
"vocab_size": 30522
|
34 |
+
}
|
run-14/checkpoint-196/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2295b7e48286c79db173661971c047924eade017265de90326321524f679f31d
|
3 |
+
size 17549312
|
run-14/checkpoint-196/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1a3cee671de739e7b50e880e304f8e32cb2a7ce327104d50edeec5cf77a548e4
|
3 |
+
size 35122746
|
run-14/checkpoint-196/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f9569c132b78af182a75b126495eb19599bcc7d757d8aa23b142d6678c6b23bf
|
3 |
+
size 14054
|
run-14/checkpoint-196/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1dad2624a96739394d21738b94d277594eaec288b5723af012115c96cc2899f2
|
3 |
+
size 1064
|
run-14/checkpoint-196/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
run-14/checkpoint-196/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
run-14/checkpoint-196/tokenizer_config.json
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_basic_tokenize": true,
|
47 |
+
"do_lower_case": true,
|
48 |
+
"mask_token": "[MASK]",
|
49 |
+
"model_max_length": 512,
|
50 |
+
"never_split": null,
|
51 |
+
"pad_token": "[PAD]",
|
52 |
+
"sep_token": "[SEP]",
|
53 |
+
"strip_accents": null,
|
54 |
+
"tokenize_chinese_chars": true,
|
55 |
+
"tokenizer_class": "BertTokenizer",
|
56 |
+
"unk_token": "[UNK]"
|
57 |
+
}
|
run-14/checkpoint-196/trainer_state.json
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": 0.8228962818003914,
|
3 |
+
"best_model_checkpoint": "tiny-bert-sst2-distilled/run-14/checkpoint-196",
|
4 |
+
"epoch": 4.0,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 196,
|
7 |
+
"is_hyper_param_search": true,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 1.0,
|
13 |
+
"grad_norm": 1.3073023557662964,
|
14 |
+
"learning_rate": 0.00021800841920007835,
|
15 |
+
"loss": 0.5666,
|
16 |
+
"step": 49
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 1.0,
|
20 |
+
"eval_accuracy": 0.7720156555772995,
|
21 |
+
"eval_f1": 0.8010247651579846,
|
22 |
+
"eval_loss": 0.48246321082115173,
|
23 |
+
"eval_precision": 0.7106060606060606,
|
24 |
+
"eval_recall": 0.9178082191780822,
|
25 |
+
"eval_runtime": 28.2754,
|
26 |
+
"eval_samples_per_second": 36.145,
|
27 |
+
"eval_steps_per_second": 1.132,
|
28 |
+
"step": 49
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"epoch": 2.0,
|
32 |
+
"grad_norm": 3.786695957183838,
|
33 |
+
"learning_rate": 0.00016350631440005875,
|
34 |
+
"loss": 0.4698,
|
35 |
+
"step": 98
|
36 |
+
},
|
37 |
+
{
|
38 |
+
"epoch": 2.0,
|
39 |
+
"eval_accuracy": 0.8023483365949119,
|
40 |
+
"eval_f1": 0.8212389380530973,
|
41 |
+
"eval_loss": 0.436308890581131,
|
42 |
+
"eval_precision": 0.7495961227786753,
|
43 |
+
"eval_recall": 0.9080234833659491,
|
44 |
+
"eval_runtime": 28.7331,
|
45 |
+
"eval_samples_per_second": 35.569,
|
46 |
+
"eval_steps_per_second": 1.114,
|
47 |
+
"step": 98
|
48 |
+
},
|
49 |
+
{
|
50 |
+
"epoch": 3.0,
|
51 |
+
"grad_norm": 2.9201297760009766,
|
52 |
+
"learning_rate": 0.00010900420960003918,
|
53 |
+
"loss": 0.4418,
|
54 |
+
"step": 147
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"epoch": 3.0,
|
58 |
+
"eval_accuracy": 0.8111545988258317,
|
59 |
+
"eval_f1": 0.8197945845004668,
|
60 |
+
"eval_loss": 0.41809919476509094,
|
61 |
+
"eval_precision": 0.7839285714285714,
|
62 |
+
"eval_recall": 0.8590998043052838,
|
63 |
+
"eval_runtime": 27.7463,
|
64 |
+
"eval_samples_per_second": 36.834,
|
65 |
+
"eval_steps_per_second": 1.153,
|
66 |
+
"step": 147
|
67 |
+
},
|
68 |
+
{
|
69 |
+
"epoch": 4.0,
|
70 |
+
"grad_norm": 1.8454147577285767,
|
71 |
+
"learning_rate": 5.450210480001959e-05,
|
72 |
+
"loss": 0.4241,
|
73 |
+
"step": 196
|
74 |
+
},
|
75 |
+
{
|
76 |
+
"epoch": 4.0,
|
77 |
+
"eval_accuracy": 0.8228962818003914,
|
78 |
+
"eval_f1": 0.8340971585701191,
|
79 |
+
"eval_loss": 0.41074350476264954,
|
80 |
+
"eval_precision": 0.7844827586206896,
|
81 |
+
"eval_recall": 0.8904109589041096,
|
82 |
+
"eval_runtime": 27.5953,
|
83 |
+
"eval_samples_per_second": 37.035,
|
84 |
+
"eval_steps_per_second": 1.16,
|
85 |
+
"step": 196
|
86 |
+
}
|
87 |
+
],
|
88 |
+
"logging_steps": 500,
|
89 |
+
"max_steps": 245,
|
90 |
+
"num_input_tokens_seen": 0,
|
91 |
+
"num_train_epochs": 5,
|
92 |
+
"save_steps": 500,
|
93 |
+
"total_flos": 942780789120.0,
|
94 |
+
"train_batch_size": 63,
|
95 |
+
"trial_name": null,
|
96 |
+
"trial_params": {
|
97 |
+
"alpha": 0.8444228810359851,
|
98 |
+
"learning_rate": 0.00027251052400009793,
|
99 |
+
"num_train_epochs": 5,
|
100 |
+
"per_device_train_batch_size": 63,
|
101 |
+
"temperature": 21
|
102 |
+
}
|
103 |
+
}
|
run-14/checkpoint-196/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6a93046ce295ef56d7e5753c2ad419c6bb89f07af26dc70719f2b8f995444c3d
|
3 |
+
size 4920
|
run-14/checkpoint-196/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
run-14/checkpoint-245/config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "google/bert_uncased_L-2_H-128_A-2",
|
3 |
+
"architectures": [
|
4 |
+
"BertForSequenceClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"hidden_act": "gelu",
|
9 |
+
"hidden_dropout_prob": 0.1,
|
10 |
+
"hidden_size": 128,
|
11 |
+
"id2label": {
|
12 |
+
"0": "negative",
|
13 |
+
"1": "positive"
|
14 |
+
},
|
15 |
+
"initializer_range": 0.02,
|
16 |
+
"intermediate_size": 512,
|
17 |
+
"label2id": {
|
18 |
+
"negative": "0",
|
19 |
+
"positive": "1"
|
20 |
+
},
|
21 |
+
"layer_norm_eps": 1e-12,
|
22 |
+
"max_position_embeddings": 512,
|
23 |
+
"model_type": "bert",
|
24 |
+
"num_attention_heads": 2,
|
25 |
+
"num_hidden_layers": 2,
|
26 |
+
"pad_token_id": 0,
|
27 |
+
"position_embedding_type": "absolute",
|
28 |
+
"problem_type": "single_label_classification",
|
29 |
+
"torch_dtype": "float32",
|
30 |
+
"transformers_version": "4.38.2",
|
31 |
+
"type_vocab_size": 2,
|
32 |
+
"use_cache": true,
|
33 |
+
"vocab_size": 30522
|
34 |
+
}
|
run-14/checkpoint-245/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fd40f2db7e1cb181ebdc9edae6c62e7a8ad5a6acf52a9f5676872ac293882353
|
3 |
+
size 17549312
|
run-14/checkpoint-245/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:74c990e2e48a0964cc6902a241e1b76969b468bab49a8cdd0191edcb595ea1cf
|
3 |
+
size 35122746
|
run-14/checkpoint-245/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c71858aa009ee75396c24a85135d9a9552bdda497e94f0a0ebf3e44e140711e9
|
3 |
+
size 14054
|
run-14/checkpoint-245/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d3e1b5c2b971bf5bf7e19bf20021cabc7cd11f07fcae17ab9a6ba7e2a364c95d
|
3 |
+
size 1064
|
run-14/checkpoint-245/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
run-14/checkpoint-245/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
run-14/checkpoint-245/tokenizer_config.json
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_basic_tokenize": true,
|
47 |
+
"do_lower_case": true,
|
48 |
+
"mask_token": "[MASK]",
|
49 |
+
"model_max_length": 512,
|
50 |
+
"never_split": null,
|
51 |
+
"pad_token": "[PAD]",
|
52 |
+
"sep_token": "[SEP]",
|
53 |
+
"strip_accents": null,
|
54 |
+
"tokenize_chinese_chars": true,
|
55 |
+
"tokenizer_class": "BertTokenizer",
|
56 |
+
"unk_token": "[UNK]"
|
57 |
+
}
|
run-14/checkpoint-245/trainer_state.json
ADDED
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": 0.8287671232876712,
|
3 |
+
"best_model_checkpoint": "tiny-bert-sst2-distilled/run-14/checkpoint-245",
|
4 |
+
"epoch": 5.0,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 245,
|
7 |
+
"is_hyper_param_search": true,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 1.0,
|
13 |
+
"grad_norm": 1.3073023557662964,
|
14 |
+
"learning_rate": 0.00021800841920007835,
|
15 |
+
"loss": 0.5666,
|
16 |
+
"step": 49
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 1.0,
|
20 |
+
"eval_accuracy": 0.7720156555772995,
|
21 |
+
"eval_f1": 0.8010247651579846,
|
22 |
+
"eval_loss": 0.48246321082115173,
|
23 |
+
"eval_precision": 0.7106060606060606,
|
24 |
+
"eval_recall": 0.9178082191780822,
|
25 |
+
"eval_runtime": 28.2754,
|
26 |
+
"eval_samples_per_second": 36.145,
|
27 |
+
"eval_steps_per_second": 1.132,
|
28 |
+
"step": 49
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"epoch": 2.0,
|
32 |
+
"grad_norm": 3.786695957183838,
|
33 |
+
"learning_rate": 0.00016350631440005875,
|
34 |
+
"loss": 0.4698,
|
35 |
+
"step": 98
|
36 |
+
},
|
37 |
+
{
|
38 |
+
"epoch": 2.0,
|
39 |
+
"eval_accuracy": 0.8023483365949119,
|
40 |
+
"eval_f1": 0.8212389380530973,
|
41 |
+
"eval_loss": 0.436308890581131,
|
42 |
+
"eval_precision": 0.7495961227786753,
|
43 |
+
"eval_recall": 0.9080234833659491,
|
44 |
+
"eval_runtime": 28.7331,
|
45 |
+
"eval_samples_per_second": 35.569,
|
46 |
+
"eval_steps_per_second": 1.114,
|
47 |
+
"step": 98
|
48 |
+
},
|
49 |
+
{
|
50 |
+
"epoch": 3.0,
|
51 |
+
"grad_norm": 2.9201297760009766,
|
52 |
+
"learning_rate": 0.00010900420960003918,
|
53 |
+
"loss": 0.4418,
|
54 |
+
"step": 147
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"epoch": 3.0,
|
58 |
+
"eval_accuracy": 0.8111545988258317,
|
59 |
+
"eval_f1": 0.8197945845004668,
|
60 |
+
"eval_loss": 0.41809919476509094,
|
61 |
+
"eval_precision": 0.7839285714285714,
|
62 |
+
"eval_recall": 0.8590998043052838,
|
63 |
+
"eval_runtime": 27.7463,
|
64 |
+
"eval_samples_per_second": 36.834,
|
65 |
+
"eval_steps_per_second": 1.153,
|
66 |
+
"step": 147
|
67 |
+
},
|
68 |
+
{
|
69 |
+
"epoch": 4.0,
|
70 |
+
"grad_norm": 1.8454147577285767,
|
71 |
+
"learning_rate": 5.450210480001959e-05,
|
72 |
+
"loss": 0.4241,
|
73 |
+
"step": 196
|
74 |
+
},
|
75 |
+
{
|
76 |
+
"epoch": 4.0,
|
77 |
+
"eval_accuracy": 0.8228962818003914,
|
78 |
+
"eval_f1": 0.8340971585701191,
|
79 |
+
"eval_loss": 0.41074350476264954,
|
80 |
+
"eval_precision": 0.7844827586206896,
|
81 |
+
"eval_recall": 0.8904109589041096,
|
82 |
+
"eval_runtime": 27.5953,
|
83 |
+
"eval_samples_per_second": 37.035,
|
84 |
+
"eval_steps_per_second": 1.16,
|
85 |
+
"step": 196
|
86 |
+
},
|
87 |
+
{
|
88 |
+
"epoch": 5.0,
|
89 |
+
"grad_norm": 1.8352748155593872,
|
90 |
+
"learning_rate": 0.0,
|
91 |
+
"loss": 0.4122,
|
92 |
+
"step": 245
|
93 |
+
},
|
94 |
+
{
|
95 |
+
"epoch": 5.0,
|
96 |
+
"eval_accuracy": 0.8287671232876712,
|
97 |
+
"eval_f1": 0.839596700274977,
|
98 |
+
"eval_loss": 0.4070187509059906,
|
99 |
+
"eval_precision": 0.7896551724137931,
|
100 |
+
"eval_recall": 0.8962818003913894,
|
101 |
+
"eval_runtime": 28.0192,
|
102 |
+
"eval_samples_per_second": 36.475,
|
103 |
+
"eval_steps_per_second": 1.142,
|
104 |
+
"step": 245
|
105 |
+
}
|
106 |
+
],
|
107 |
+
"logging_steps": 500,
|
108 |
+
"max_steps": 245,
|
109 |
+
"num_input_tokens_seen": 0,
|
110 |
+
"num_train_epochs": 5,
|
111 |
+
"save_steps": 500,
|
112 |
+
"total_flos": 1178475986400.0,
|
113 |
+
"train_batch_size": 63,
|
114 |
+
"trial_name": null,
|
115 |
+
"trial_params": {
|
116 |
+
"alpha": 0.8444228810359851,
|
117 |
+
"learning_rate": 0.00027251052400009793,
|
118 |
+
"num_train_epochs": 5,
|
119 |
+
"per_device_train_batch_size": 63,
|
120 |
+
"temperature": 21
|
121 |
+
}
|
122 |
+
}
|
run-14/checkpoint-245/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6a93046ce295ef56d7e5753c2ad419c6bb89f07af26dc70719f2b8f995444c3d
|
3 |
+
size 4920
|
run-14/checkpoint-245/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
run-16/checkpoint-55/config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "google/bert_uncased_L-2_H-128_A-2",
|
3 |
+
"architectures": [
|
4 |
+
"BertForSequenceClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"hidden_act": "gelu",
|
9 |
+
"hidden_dropout_prob": 0.1,
|
10 |
+
"hidden_size": 128,
|
11 |
+
"id2label": {
|
12 |
+
"0": "negative",
|
13 |
+
"1": "positive"
|
14 |
+
},
|
15 |
+
"initializer_range": 0.02,
|
16 |
+
"intermediate_size": 512,
|
17 |
+
"label2id": {
|
18 |
+
"negative": "0",
|
19 |
+
"positive": "1"
|
20 |
+
},
|
21 |
+
"layer_norm_eps": 1e-12,
|
22 |
+
"max_position_embeddings": 512,
|
23 |
+
"model_type": "bert",
|
24 |
+
"num_attention_heads": 2,
|
25 |
+
"num_hidden_layers": 2,
|
26 |
+
"pad_token_id": 0,
|
27 |
+
"position_embedding_type": "absolute",
|
28 |
+
"problem_type": "single_label_classification",
|
29 |
+
"torch_dtype": "float32",
|
30 |
+
"transformers_version": "4.38.2",
|
31 |
+
"type_vocab_size": 2,
|
32 |
+
"use_cache": true,
|
33 |
+
"vocab_size": 30522
|
34 |
+
}
|
run-16/checkpoint-55/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:840f48c35da7a97bf8423c4d56b59abfff31d02d3333ba76897059f3fe2011af
|
3 |
+
size 17549312
|
run-16/checkpoint-55/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5f8fecbfd4eb601d2b2ebaf5ef4cebfe7b026cb621336b10df98c6d2afcdd919
|
3 |
+
size 35122746
|
run-16/checkpoint-55/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b588f6b58c8f950c9442cf99e4b25f82e10f4a04910349fe09a3ddec4d480990
|
3 |
+
size 14054
|
run-16/checkpoint-55/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:94c8ef987310ab0df3dc5e384122633d92955c5ca55290486fcda2f6d0a2e51c
|
3 |
+
size 1064
|
run-16/checkpoint-55/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
run-16/checkpoint-55/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
run-16/checkpoint-55/tokenizer_config.json
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_basic_tokenize": true,
|
47 |
+
"do_lower_case": true,
|
48 |
+
"mask_token": "[MASK]",
|
49 |
+
"model_max_length": 512,
|
50 |
+
"never_split": null,
|
51 |
+
"pad_token": "[PAD]",
|
52 |
+
"sep_token": "[SEP]",
|
53 |
+
"strip_accents": null,
|
54 |
+
"tokenize_chinese_chars": true,
|
55 |
+
"tokenizer_class": "BertTokenizer",
|
56 |
+
"unk_token": "[UNK]"
|
57 |
+
}
|
run-16/checkpoint-55/trainer_state.json
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": 0.7181996086105675,
|
3 |
+
"best_model_checkpoint": "tiny-bert-sst2-distilled/run-16/checkpoint-55",
|
4 |
+
"epoch": 1.0,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 55,
|
7 |
+
"is_hyper_param_search": true,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 1.0,
|
13 |
+
"grad_norm": 1.115179181098938,
|
14 |
+
"learning_rate": 0.0001711445020920055,
|
15 |
+
"loss": 0.4773,
|
16 |
+
"step": 55
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 1.0,
|
20 |
+
"eval_accuracy": 0.7181996086105675,
|
21 |
+
"eval_f1": 0.7125748502994013,
|
22 |
+
"eval_loss": 0.4258935749530792,
|
23 |
+
"eval_precision": 0.7270875763747454,
|
24 |
+
"eval_recall": 0.6986301369863014,
|
25 |
+
"eval_runtime": 27.8796,
|
26 |
+
"eval_samples_per_second": 36.658,
|
27 |
+
"eval_steps_per_second": 1.148,
|
28 |
+
"step": 55
|
29 |
+
}
|
30 |
+
],
|
31 |
+
"logging_steps": 500,
|
32 |
+
"max_steps": 275,
|
33 |
+
"num_input_tokens_seen": 0,
|
34 |
+
"num_train_epochs": 5,
|
35 |
+
"save_steps": 500,
|
36 |
+
"total_flos": 235695197280.0,
|
37 |
+
"train_batch_size": 56,
|
38 |
+
"trial_name": null,
|
39 |
+
"trial_params": {
|
40 |
+
"alpha": 0.6575195229183988,
|
41 |
+
"learning_rate": 0.00021393062761500687,
|
42 |
+
"num_train_epochs": 5,
|
43 |
+
"per_device_train_batch_size": 56,
|
44 |
+
"temperature": 22
|
45 |
+
}
|
46 |
+
}
|
run-16/checkpoint-55/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:853b9bd91d8e7500b2e68ab85992695ef180b9f0e29125c26ec58713c7d0700a
|
3 |
+
size 4920
|
run-16/checkpoint-55/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer.json
CHANGED
@@ -2,7 +2,7 @@
|
|
2 |
"version": "1.0",
|
3 |
"truncation": {
|
4 |
"direction": "Right",
|
5 |
-
"max_length":
|
6 |
"strategy": "LongestFirst",
|
7 |
"stride": 0
|
8 |
},
|
|
|
2 |
"version": "1.0",
|
3 |
"truncation": {
|
4 |
"direction": "Right",
|
5 |
+
"max_length": 31,
|
6 |
"strategy": "LongestFirst",
|
7 |
"stride": 0
|
8 |
},
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4920
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:853b9bd91d8e7500b2e68ab85992695ef180b9f0e29125c26ec58713c7d0700a
|
3 |
size 4920
|