ChrisZeng commited on
Commit
0480777
1 Parent(s): 5ff66fc

Training in progress, epoch 2

Browse files
checkpoint-326/config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "ynie/electra-large-discriminator-snli_mnli_fever_anli_R1_R2_R3-nli",
3
+ "architectures": [
4
+ "ElectraForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "embedding_size": 1024,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "entailment",
14
+ "1": "neutral",
15
+ "2": "contradiction"
16
+ },
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 4096,
19
+ "label2id": {
20
+ "contradiction": 2,
21
+ "entailment": 0,
22
+ "neutral": 1
23
+ },
24
+ "layer_norm_eps": 1e-12,
25
+ "max_position_embeddings": 512,
26
+ "model_type": "electra",
27
+ "num_attention_heads": 16,
28
+ "num_hidden_layers": 24,
29
+ "pad_token_id": 0,
30
+ "position_embedding_type": "absolute",
31
+ "problem_type": "multi_label_classification",
32
+ "summary_activation": "gelu",
33
+ "summary_last_dropout": 0.1,
34
+ "summary_type": "first",
35
+ "summary_use_proj": true,
36
+ "torch_dtype": "float32",
37
+ "transformers_version": "4.16.2",
38
+ "type_vocab_size": 2,
39
+ "use_cache": true,
40
+ "vocab_size": 30522
41
+ }
checkpoint-326/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b81e4851c2881dcef711af4697ef50a6462ba5c7e1c5b5e3b8378d35e8f383f
3
+ size 2681485310
checkpoint-326/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86cdd59d0a81eadb0634ce6bcc916feadce861da8cc4cc3451ee185ad4eaa4ac
3
+ size 1340743917
checkpoint-326/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39a0ca327a2af702d5875e110a84b841cab66fcf5223f03d6137027933eb3adb
3
+ size 14503
checkpoint-326/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8ce0704b9494d69b9eb0069603a0ed18de95a226f489894d03248f831bc5906
3
+ size 559
checkpoint-326/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6aae1bb5b445401f65c6949bbf3e6ceb826aa702ac8501d27afc0137be597724
3
+ size 623
checkpoint-326/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
checkpoint-326/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-326/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "normalization": true, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "google/electra-large-discriminator", "tokenizer_class": "ElectraTokenizer"}
checkpoint-326/trainer_state.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.3409559428691864,
3
+ "best_model_checkpoint": "outputs/electra-nli-efl-tweeteval/checkpoint-326",
4
+ "epoch": 1.997323135755258,
5
+ "global_step": 326,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "learning_rate": 9.499999999999999e-07,
13
+ "loss": 0.4384,
14
+ "step": 163
15
+ },
16
+ {
17
+ "epoch": 1.0,
18
+ "eval_accuracy": 0.7444061962134251,
19
+ "eval_f1": 0.7308261375858633,
20
+ "eval_loss": 0.39615127444267273,
21
+ "eval_runtime": 9.0599,
22
+ "eval_samples_per_second": 256.514,
23
+ "eval_steps_per_second": 32.119,
24
+ "step": 163
25
+ },
26
+ {
27
+ "epoch": 2.0,
28
+ "learning_rate": 9e-07,
29
+ "loss": 0.3447,
30
+ "step": 326
31
+ },
32
+ {
33
+ "epoch": 2.0,
34
+ "eval_accuracy": 0.76592082616179,
35
+ "eval_f1": 0.7552159046464709,
36
+ "eval_loss": 0.3409559428691864,
37
+ "eval_runtime": 9.4378,
38
+ "eval_samples_per_second": 246.244,
39
+ "eval_steps_per_second": 30.833,
40
+ "step": 326
41
+ }
42
+ ],
43
+ "max_steps": 3260,
44
+ "num_train_epochs": 20,
45
+ "total_flos": 6097705758760512.0,
46
+ "trial_name": null,
47
+ "trial_params": null
48
+ }
checkpoint-326/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82126e010e751131ea0bffbc21275298a6bdbc244157fe27eed5dd4b7b0d6e0f
3
+ size 3119
checkpoint-326/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-489/config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "ynie/electra-large-discriminator-snli_mnli_fever_anli_R1_R2_R3-nli",
3
+ "architectures": [
4
+ "ElectraForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "embedding_size": 1024,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "entailment",
14
+ "1": "neutral",
15
+ "2": "contradiction"
16
+ },
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 4096,
19
+ "label2id": {
20
+ "contradiction": 2,
21
+ "entailment": 0,
22
+ "neutral": 1
23
+ },
24
+ "layer_norm_eps": 1e-12,
25
+ "max_position_embeddings": 512,
26
+ "model_type": "electra",
27
+ "num_attention_heads": 16,
28
+ "num_hidden_layers": 24,
29
+ "pad_token_id": 0,
30
+ "position_embedding_type": "absolute",
31
+ "problem_type": "multi_label_classification",
32
+ "summary_activation": "gelu",
33
+ "summary_last_dropout": 0.1,
34
+ "summary_type": "first",
35
+ "summary_use_proj": true,
36
+ "torch_dtype": "float32",
37
+ "transformers_version": "4.16.2",
38
+ "type_vocab_size": 2,
39
+ "use_cache": true,
40
+ "vocab_size": 30522
41
+ }
checkpoint-489/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d82fcecb740add996bf5ee0fb32b520102097d0504c5103e7a5e8dd20cb1fa7
3
+ size 2681485310
checkpoint-489/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8cc6e01615d9fbc03a612a6299c16196268fe2e23e228d881f2f3bfa6b3ee9c
3
+ size 1340743917
checkpoint-489/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:014f12f4d9dba30ad7ff0de7b420eb7692dfda6f0502a3815d98e2995bb395f0
3
+ size 14503
checkpoint-489/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b33ea70d27f310dbe6580da93bd08b7140380c500e01a16282e5d91a0fbfcafe
3
+ size 559
checkpoint-489/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08df44e0bd4d8b64139acfaf9c03eb9e41cbfb7bc1e64291bfc485d827865a29
3
+ size 623
checkpoint-489/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
checkpoint-489/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-489/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "normalization": true, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "google/electra-large-discriminator", "tokenizer_class": "ElectraTokenizer"}
checkpoint-489/trainer_state.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.32338443398475647,
3
+ "best_model_checkpoint": "outputs/electra-nli-efl-tweeteval/checkpoint-489",
4
+ "epoch": 2.9973231357552583,
5
+ "global_step": 489,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "learning_rate": 9.499999999999999e-07,
13
+ "loss": 0.4384,
14
+ "step": 163
15
+ },
16
+ {
17
+ "epoch": 1.0,
18
+ "eval_accuracy": 0.7444061962134251,
19
+ "eval_f1": 0.7308261375858633,
20
+ "eval_loss": 0.39615127444267273,
21
+ "eval_runtime": 9.0599,
22
+ "eval_samples_per_second": 256.514,
23
+ "eval_steps_per_second": 32.119,
24
+ "step": 163
25
+ },
26
+ {
27
+ "epoch": 2.0,
28
+ "learning_rate": 9e-07,
29
+ "loss": 0.3447,
30
+ "step": 326
31
+ },
32
+ {
33
+ "epoch": 2.0,
34
+ "eval_accuracy": 0.76592082616179,
35
+ "eval_f1": 0.7552159046464709,
36
+ "eval_loss": 0.3409559428691864,
37
+ "eval_runtime": 9.4378,
38
+ "eval_samples_per_second": 246.244,
39
+ "eval_steps_per_second": 30.833,
40
+ "step": 326
41
+ },
42
+ {
43
+ "epoch": 3.0,
44
+ "learning_rate": 8.499999999999999e-07,
45
+ "loss": 0.3057,
46
+ "step": 489
47
+ },
48
+ {
49
+ "epoch": 3.0,
50
+ "eval_accuracy": 0.7749569707401033,
51
+ "eval_f1": 0.768808341108185,
52
+ "eval_loss": 0.32338443398475647,
53
+ "eval_runtime": 9.0418,
54
+ "eval_samples_per_second": 257.028,
55
+ "eval_steps_per_second": 32.184,
56
+ "step": 489
57
+ }
58
+ ],
59
+ "max_steps": 3260,
60
+ "num_train_epochs": 20,
61
+ "total_flos": 9143325990301824.0,
62
+ "trial_name": null,
63
+ "trial_params": null
64
+ }
checkpoint-489/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82126e010e751131ea0bffbc21275298a6bdbc244157fe27eed5dd4b7b0d6e0f
3
+ size 3119
checkpoint-489/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:14b4ec5568a4ea5a2a6e8bb5651b0a178b89c7f36e4cb0ad65616ef313550349
3
  size 1340743917
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8cc6e01615d9fbc03a612a6299c16196268fe2e23e228d881f2f3bfa6b3ee9c
3
  size 1340743917