gchhablani commited on
Commit
ee8b74b
1 Parent(s): aaa00e1

Training in progress, epoch 5

Browse files
checkpoint-477/config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-large-cased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "finetuning_task": "wnli",
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 1024,
14
+ "id2label": {
15
+ "0": "not_entailment",
16
+ "1": "entailment"
17
+ },
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 4096,
20
+ "label2id": {
21
+ "entailment": 1,
22
+ "not_entailment": 0
23
+ },
24
+ "layer_norm_eps": 1e-12,
25
+ "max_position_embeddings": 512,
26
+ "model_type": "bert",
27
+ "num_attention_heads": 16,
28
+ "num_hidden_layers": 24,
29
+ "pad_token_id": 0,
30
+ "pooler_fc_size": 768,
31
+ "pooler_num_attention_heads": 12,
32
+ "pooler_num_fc_layers": 3,
33
+ "pooler_size_per_head": 128,
34
+ "pooler_type": "first_token_transform",
35
+ "position_embedding_type": "absolute",
36
+ "problem_type": "single_label_classification",
37
+ "torch_dtype": "float32",
38
+ "transformers_version": "4.11.0.dev0",
39
+ "type_vocab_size": 2,
40
+ "use_cache": true,
41
+ "vocab_size": 28996
42
+ }
checkpoint-477/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d40534f8627b662a6d777bb3acfde9f0572158f7852fd75540989ceedc67d86
3
+ size 2668879261
checkpoint-477/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42eca72cc9c5a314ef8e5559cce9f781148a145e44faf105b07c3b8c66f70555
3
+ size 1334486957
checkpoint-477/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d21ec070ab5fe78d7472ff002a3f34d25f2b59893947f95d0451bb6728e66e5
3
+ size 14503
checkpoint-477/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56d0e3f5707f41b852d867c5f117f56ed4afd7707ac9ccfd0816ff567ae3e149
3
+ size 623
checkpoint-477/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
checkpoint-477/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-477/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "bert-large-cased", "tokenizer_class": "BertTokenizer"}
checkpoint-477/trainer_state.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
+ "global_step": 477,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "learning_rate": 1.6000000000000003e-05,
13
+ "loss": 0.7114,
14
+ "step": 159
15
+ },
16
+ {
17
+ "epoch": 1.0,
18
+ "eval_accuracy": 0.5633802816901409,
19
+ "eval_loss": 0.6923297047615051,
20
+ "eval_runtime": 2.3107,
21
+ "eval_samples_per_second": 30.726,
22
+ "eval_steps_per_second": 3.895,
23
+ "step": 159
24
+ },
25
+ {
26
+ "epoch": 2.0,
27
+ "learning_rate": 1.2e-05,
28
+ "loss": 0.7141,
29
+ "step": 318
30
+ },
31
+ {
32
+ "epoch": 2.0,
33
+ "eval_accuracy": 0.5633802816901409,
34
+ "eval_loss": 0.6894838809967041,
35
+ "eval_runtime": 2.3191,
36
+ "eval_samples_per_second": 30.616,
37
+ "eval_steps_per_second": 3.881,
38
+ "step": 318
39
+ },
40
+ {
41
+ "epoch": 3.0,
42
+ "learning_rate": 8.000000000000001e-06,
43
+ "loss": 0.7063,
44
+ "step": 477
45
+ },
46
+ {
47
+ "epoch": 3.0,
48
+ "eval_accuracy": 0.5633802816901409,
49
+ "eval_loss": 0.6930345296859741,
50
+ "eval_runtime": 2.3253,
51
+ "eval_samples_per_second": 30.534,
52
+ "eval_steps_per_second": 3.87,
53
+ "step": 477
54
+ }
55
+ ],
56
+ "max_steps": 795,
57
+ "num_train_epochs": 5,
58
+ "total_flos": 1775329247139840.0,
59
+ "trial_name": null,
60
+ "trial_params": null
61
+ }
checkpoint-477/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f437d761207ace3e79f2ae16f9d937b1288fc6469422d252660a340c9050722c
3
+ size 2799
checkpoint-477/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-636/config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-large-cased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "finetuning_task": "wnli",
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 1024,
14
+ "id2label": {
15
+ "0": "not_entailment",
16
+ "1": "entailment"
17
+ },
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 4096,
20
+ "label2id": {
21
+ "entailment": 1,
22
+ "not_entailment": 0
23
+ },
24
+ "layer_norm_eps": 1e-12,
25
+ "max_position_embeddings": 512,
26
+ "model_type": "bert",
27
+ "num_attention_heads": 16,
28
+ "num_hidden_layers": 24,
29
+ "pad_token_id": 0,
30
+ "pooler_fc_size": 768,
31
+ "pooler_num_attention_heads": 12,
32
+ "pooler_num_fc_layers": 3,
33
+ "pooler_size_per_head": 128,
34
+ "pooler_type": "first_token_transform",
35
+ "position_embedding_type": "absolute",
36
+ "problem_type": "single_label_classification",
37
+ "torch_dtype": "float32",
38
+ "transformers_version": "4.11.0.dev0",
39
+ "type_vocab_size": 2,
40
+ "use_cache": true,
41
+ "vocab_size": 28996
42
+ }
checkpoint-636/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79128435f78f20f2211914a60d3bb761ee4ffbca13db696674be2b1457a6222d
3
+ size 2668879261
checkpoint-636/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:879d7449bc5ad24b22820cfee1c5ea64782629e287fd9df0bdf9baeb350784f2
3
+ size 1334486957
checkpoint-636/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e14051863c09c76b915e88468d6d19a9a1a7807238be3c557278cbd25f19c80f
3
+ size 14503
checkpoint-636/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad181780f1b400cc04b3b1b11db86cebbe1239bfdcaff64bc3f1ea579270f1d2
3
+ size 623
checkpoint-636/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
checkpoint-636/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-636/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "bert-large-cased", "tokenizer_class": "BertTokenizer"}
checkpoint-636/trainer_state.json ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 4.0,
5
+ "global_step": 636,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "learning_rate": 1.6000000000000003e-05,
13
+ "loss": 0.7114,
14
+ "step": 159
15
+ },
16
+ {
17
+ "epoch": 1.0,
18
+ "eval_accuracy": 0.5633802816901409,
19
+ "eval_loss": 0.6923297047615051,
20
+ "eval_runtime": 2.3107,
21
+ "eval_samples_per_second": 30.726,
22
+ "eval_steps_per_second": 3.895,
23
+ "step": 159
24
+ },
25
+ {
26
+ "epoch": 2.0,
27
+ "learning_rate": 1.2e-05,
28
+ "loss": 0.7141,
29
+ "step": 318
30
+ },
31
+ {
32
+ "epoch": 2.0,
33
+ "eval_accuracy": 0.5633802816901409,
34
+ "eval_loss": 0.6894838809967041,
35
+ "eval_runtime": 2.3191,
36
+ "eval_samples_per_second": 30.616,
37
+ "eval_steps_per_second": 3.881,
38
+ "step": 318
39
+ },
40
+ {
41
+ "epoch": 3.0,
42
+ "learning_rate": 8.000000000000001e-06,
43
+ "loss": 0.7063,
44
+ "step": 477
45
+ },
46
+ {
47
+ "epoch": 3.0,
48
+ "eval_accuracy": 0.5633802816901409,
49
+ "eval_loss": 0.6930345296859741,
50
+ "eval_runtime": 2.3253,
51
+ "eval_samples_per_second": 30.534,
52
+ "eval_steps_per_second": 3.87,
53
+ "step": 477
54
+ },
55
+ {
56
+ "epoch": 4.0,
57
+ "learning_rate": 4.000000000000001e-06,
58
+ "loss": 0.712,
59
+ "step": 636
60
+ },
61
+ {
62
+ "epoch": 4.0,
63
+ "eval_accuracy": 0.4507042253521127,
64
+ "eval_loss": 0.707716703414917,
65
+ "eval_runtime": 2.3283,
66
+ "eval_samples_per_second": 30.494,
67
+ "eval_steps_per_second": 3.865,
68
+ "step": 636
69
+ }
70
+ ],
71
+ "max_steps": 795,
72
+ "num_train_epochs": 5,
73
+ "total_flos": 2367105662853120.0,
74
+ "trial_name": null,
75
+ "trial_params": null
76
+ }
checkpoint-636/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f437d761207ace3e79f2ae16f9d937b1288fc6469422d252660a340c9050722c
3
+ size 2799
checkpoint-636/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-795/config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-large-cased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "finetuning_task": "wnli",
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 1024,
14
+ "id2label": {
15
+ "0": "not_entailment",
16
+ "1": "entailment"
17
+ },
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 4096,
20
+ "label2id": {
21
+ "entailment": 1,
22
+ "not_entailment": 0
23
+ },
24
+ "layer_norm_eps": 1e-12,
25
+ "max_position_embeddings": 512,
26
+ "model_type": "bert",
27
+ "num_attention_heads": 16,
28
+ "num_hidden_layers": 24,
29
+ "pad_token_id": 0,
30
+ "pooler_fc_size": 768,
31
+ "pooler_num_attention_heads": 12,
32
+ "pooler_num_fc_layers": 3,
33
+ "pooler_size_per_head": 128,
34
+ "pooler_type": "first_token_transform",
35
+ "position_embedding_type": "absolute",
36
+ "problem_type": "single_label_classification",
37
+ "torch_dtype": "float32",
38
+ "transformers_version": "4.11.0.dev0",
39
+ "type_vocab_size": 2,
40
+ "use_cache": true,
41
+ "vocab_size": 28996
42
+ }
checkpoint-795/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:637af5a1f78e71f4617acc203c5c39892b12ac2b78d74b7d17e73910f6ab3c9b
3
+ size 2668879261
checkpoint-795/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c5dde120789d9b264b76f05ebcc26560923c534520cf5f48e6122be5dc3383c
3
+ size 1334486957
checkpoint-795/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb660861d1531f35fb807a82c4b995bd19d0404348db8d756799541171638950
3
+ size 14503
checkpoint-795/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf1961577efca24a442d0f62693362a2afd9b47c3c55043080beedcf431c79aa
3
+ size 623
checkpoint-795/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
checkpoint-795/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-795/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "bert-large-cased", "tokenizer_class": "BertTokenizer"}
checkpoint-795/trainer_state.json ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 5.0,
5
+ "global_step": 795,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "learning_rate": 1.6000000000000003e-05,
13
+ "loss": 0.7114,
14
+ "step": 159
15
+ },
16
+ {
17
+ "epoch": 1.0,
18
+ "eval_accuracy": 0.5633802816901409,
19
+ "eval_loss": 0.6923297047615051,
20
+ "eval_runtime": 2.3107,
21
+ "eval_samples_per_second": 30.726,
22
+ "eval_steps_per_second": 3.895,
23
+ "step": 159
24
+ },
25
+ {
26
+ "epoch": 2.0,
27
+ "learning_rate": 1.2e-05,
28
+ "loss": 0.7141,
29
+ "step": 318
30
+ },
31
+ {
32
+ "epoch": 2.0,
33
+ "eval_accuracy": 0.5633802816901409,
34
+ "eval_loss": 0.6894838809967041,
35
+ "eval_runtime": 2.3191,
36
+ "eval_samples_per_second": 30.616,
37
+ "eval_steps_per_second": 3.881,
38
+ "step": 318
39
+ },
40
+ {
41
+ "epoch": 3.0,
42
+ "learning_rate": 8.000000000000001e-06,
43
+ "loss": 0.7063,
44
+ "step": 477
45
+ },
46
+ {
47
+ "epoch": 3.0,
48
+ "eval_accuracy": 0.5633802816901409,
49
+ "eval_loss": 0.6930345296859741,
50
+ "eval_runtime": 2.3253,
51
+ "eval_samples_per_second": 30.534,
52
+ "eval_steps_per_second": 3.87,
53
+ "step": 477
54
+ },
55
+ {
56
+ "epoch": 4.0,
57
+ "learning_rate": 4.000000000000001e-06,
58
+ "loss": 0.712,
59
+ "step": 636
60
+ },
61
+ {
62
+ "epoch": 4.0,
63
+ "eval_accuracy": 0.4507042253521127,
64
+ "eval_loss": 0.707716703414917,
65
+ "eval_runtime": 2.3283,
66
+ "eval_samples_per_second": 30.494,
67
+ "eval_steps_per_second": 3.865,
68
+ "step": 636
69
+ },
70
+ {
71
+ "epoch": 5.0,
72
+ "learning_rate": 0.0,
73
+ "loss": 0.7037,
74
+ "step": 795
75
+ },
76
+ {
77
+ "epoch": 5.0,
78
+ "eval_accuracy": 0.352112676056338,
79
+ "eval_loss": 0.7087209820747375,
80
+ "eval_runtime": 2.3213,
81
+ "eval_samples_per_second": 30.586,
82
+ "eval_steps_per_second": 3.877,
83
+ "step": 795
84
+ }
85
+ ],
86
+ "max_steps": 795,
87
+ "num_train_epochs": 5,
88
+ "total_flos": 2958882078566400.0,
89
+ "trial_name": null,
90
+ "trial_params": null
91
+ }
checkpoint-795/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f437d761207ace3e79f2ae16f9d937b1288fc6469422d252660a340c9050722c
3
+ size 2799
checkpoint-795/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cbc6c9582b34997820189caafa51f5c54a7698bdd6958aa97bca042bccaf1f5e
3
  size 1334486957
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c5dde120789d9b264b76f05ebcc26560923c534520cf5f48e6122be5dc3383c
3
  size 1334486957
runs/Sep23_04-38-21_patrick-general-gpu/events.out.tfevents.1632371921.patrick-general-gpu.956617.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:030006dd39e186cf9d299ed0a218d313e4b0611d2e659a5aaaa053737bd62d9f
3
- size 4401
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13ea0296a707ff907ede8a33e0a8facb9c6778135ccc7dd4e9793b9007aeb452
3
+ size 5841