gchhablani commited on
Commit
fa3e4e4
1 Parent(s): c848b60

End of training

Browse files
Files changed (47) hide show
  1. all_results.json +14 -0
  2. checkpoint-318/config.json +33 -0
  3. checkpoint-318/optimizer.pt +3 -0
  4. checkpoint-318/pytorch_model.bin +3 -0
  5. checkpoint-318/rng_state.pth +3 -0
  6. checkpoint-318/scheduler.pt +3 -0
  7. checkpoint-318/special_tokens_map.json +1 -0
  8. checkpoint-318/tokenizer.json +0 -0
  9. checkpoint-318/tokenizer_config.json +1 -0
  10. checkpoint-318/trainer_state.json +46 -0
  11. checkpoint-318/training_args.bin +3 -0
  12. checkpoint-477/config.json +33 -0
  13. checkpoint-477/optimizer.pt +3 -0
  14. checkpoint-477/pytorch_model.bin +3 -0
  15. checkpoint-477/rng_state.pth +3 -0
  16. checkpoint-477/scheduler.pt +3 -0
  17. checkpoint-477/special_tokens_map.json +1 -0
  18. checkpoint-477/tokenizer.json +0 -0
  19. checkpoint-477/tokenizer_config.json +1 -0
  20. checkpoint-477/trainer_state.json +61 -0
  21. checkpoint-477/training_args.bin +3 -0
  22. checkpoint-636/config.json +33 -0
  23. checkpoint-636/optimizer.pt +3 -0
  24. checkpoint-636/pytorch_model.bin +3 -0
  25. checkpoint-636/rng_state.pth +3 -0
  26. checkpoint-636/scheduler.pt +3 -0
  27. checkpoint-636/special_tokens_map.json +1 -0
  28. checkpoint-636/tokenizer.json +0 -0
  29. checkpoint-636/tokenizer_config.json +1 -0
  30. checkpoint-636/trainer_state.json +76 -0
  31. checkpoint-636/training_args.bin +3 -0
  32. checkpoint-795/config.json +33 -0
  33. checkpoint-795/optimizer.pt +3 -0
  34. checkpoint-795/pytorch_model.bin +3 -0
  35. checkpoint-795/rng_state.pth +3 -0
  36. checkpoint-795/scheduler.pt +3 -0
  37. checkpoint-795/special_tokens_map.json +1 -0
  38. checkpoint-795/tokenizer.json +0 -0
  39. checkpoint-795/tokenizer_config.json +1 -0
  40. checkpoint-795/trainer_state.json +91 -0
  41. checkpoint-795/training_args.bin +3 -0
  42. eval_results.json +9 -0
  43. pytorch_model.bin +1 -1
  44. runs/Sep23_05-28-20_patrick-general-gpu/events.out.tfevents.1632374926.patrick-general-gpu.961400.0 +2 -2
  45. runs/Sep23_05-28-20_patrick-general-gpu/events.out.tfevents.1632375225.patrick-general-gpu.961400.2 +3 -0
  46. train_results.json +8 -0
  47. trainer_state.json +100 -0
all_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.38028169014084506,
4
+ "eval_loss": 0.695301353931427,
5
+ "eval_runtime": 1.5768,
6
+ "eval_samples": 71,
7
+ "eval_samples_per_second": 45.028,
8
+ "eval_steps_per_second": 5.708,
9
+ "train_loss": 0.7078151103085691,
10
+ "train_runtime": 287.2264,
11
+ "train_samples": 635,
12
+ "train_samples_per_second": 11.054,
13
+ "train_steps_per_second": 2.768
14
+ }
checkpoint-318/config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/fnet-large",
3
+ "architectures": [
4
+ "FNetForSequenceClassification"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "finetuning_task": "wnli",
9
+ "hidden_act": "gelu_new",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "not_entailment",
14
+ "1": "entailment"
15
+ },
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 4096,
18
+ "label2id": {
19
+ "entailment": 1,
20
+ "not_entailment": 0
21
+ },
22
+ "layer_norm_eps": 1e-12,
23
+ "max_position_embeddings": 512,
24
+ "model_type": "fnet",
25
+ "num_hidden_layers": 24,
26
+ "pad_token_id": 3,
27
+ "torch_dtype": "float32",
28
+ "tpu_short_seq_length": 512,
29
+ "transformers_version": "4.11.0.dev0",
30
+ "type_vocab_size": 4,
31
+ "use_tpu_fourier_optimizations": false,
32
+ "vocab_size": 32000
33
+ }
checkpoint-318/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bce12d4f5a0b85e4456cfd0d3e03d28a7e9814e5dbdb459132d42eaffaedc10
3
+ size 1895696981
checkpoint-318/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3e62aa9d9c88c168128d34fcc6c5db3c44d71380c912d0e990db86b2f31092f
3
+ size 947877033
checkpoint-318/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78b737db473aca43f17a52b2a13eae3e7191bfa111df27e851e320597e4e34bf
3
+ size 14503
checkpoint-318/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b47bc6c3e1669bb645665447b79471b72ba684d2636f6cf50d505cc7c5ed64aa
3
+ size 623
checkpoint-318/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "<unk>", "sep_token": "[SEP]", "pad_token": "<pad>", "cls_token": "[CLS]", "mask_token": {"content": "[MASK]", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
checkpoint-318/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
checkpoint-318/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": false, "remove_space": true, "keep_accents": true, "unk_token": "<unk>", "sep_token": "[SEP]", "pad_token": "<pad>", "cls_token": "[CLS]", "mask_token": {"content": "[MASK]", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "sp_model_kwargs": {}, "model_max_length": 512, "special_tokens_map_file": "/home/gunjan/.cache/huggingface/transformers/9a50cd9b3771023230b1128ab5e112461bf36f5826d5f7eb654348e956979a54.a2cfb41d43ad2ac50fa89998bfa88393398e3bb3439ee99e1b00933bce2eb1ba", "name_or_path": "google/fnet-large", "tokenizer_class": "FNetTokenizer"}
checkpoint-318/trainer_state.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.0,
5
+ "global_step": 318,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "learning_rate": 1.6000000000000003e-05,
13
+ "loss": 0.7217,
14
+ "step": 159
15
+ },
16
+ {
17
+ "epoch": 1.0,
18
+ "eval_accuracy": 0.5633802816901409,
19
+ "eval_loss": 0.6864463686943054,
20
+ "eval_runtime": 1.553,
21
+ "eval_samples_per_second": 45.719,
22
+ "eval_steps_per_second": 5.795,
23
+ "step": 159
24
+ },
25
+ {
26
+ "epoch": 2.0,
27
+ "learning_rate": 1.2e-05,
28
+ "loss": 0.7056,
29
+ "step": 318
30
+ },
31
+ {
32
+ "epoch": 2.0,
33
+ "eval_accuracy": 0.5633802816901409,
34
+ "eval_loss": 0.6868877410888672,
35
+ "eval_runtime": 1.5642,
36
+ "eval_samples_per_second": 45.391,
37
+ "eval_steps_per_second": 5.754,
38
+ "step": 318
39
+ }
40
+ ],
41
+ "max_steps": 795,
42
+ "num_train_epochs": 5,
43
+ "total_flos": 794532446146560.0,
44
+ "trial_name": null,
45
+ "trial_params": null
46
+ }
checkpoint-318/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b644c1bae6b6798f49b77dc60581484ee19ebf5291a9ad931519f191fc6b6b11
3
+ size 2799
checkpoint-477/config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/fnet-large",
3
+ "architectures": [
4
+ "FNetForSequenceClassification"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "finetuning_task": "wnli",
9
+ "hidden_act": "gelu_new",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "not_entailment",
14
+ "1": "entailment"
15
+ },
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 4096,
18
+ "label2id": {
19
+ "entailment": 1,
20
+ "not_entailment": 0
21
+ },
22
+ "layer_norm_eps": 1e-12,
23
+ "max_position_embeddings": 512,
24
+ "model_type": "fnet",
25
+ "num_hidden_layers": 24,
26
+ "pad_token_id": 3,
27
+ "torch_dtype": "float32",
28
+ "tpu_short_seq_length": 512,
29
+ "transformers_version": "4.11.0.dev0",
30
+ "type_vocab_size": 4,
31
+ "use_tpu_fourier_optimizations": false,
32
+ "vocab_size": 32000
33
+ }
checkpoint-477/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:231e2a45efab7ef9068039b8a5d92b0c91fb2e6e68f781c677d5eb08166c3728
3
+ size 1895696981
checkpoint-477/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b3065f148846ed9ebca87ee442ab29bddd5022e13468e66b668dbe3a12d14cf
3
+ size 947877033
checkpoint-477/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e50c461d1a2392c9474f8f0cfcf9b5d995bc8a5f28cce746554b7b119bb8580e
3
+ size 14503
checkpoint-477/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56d0e3f5707f41b852d867c5f117f56ed4afd7707ac9ccfd0816ff567ae3e149
3
+ size 623
checkpoint-477/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "<unk>", "sep_token": "[SEP]", "pad_token": "<pad>", "cls_token": "[CLS]", "mask_token": {"content": "[MASK]", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
checkpoint-477/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
checkpoint-477/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": false, "remove_space": true, "keep_accents": true, "unk_token": "<unk>", "sep_token": "[SEP]", "pad_token": "<pad>", "cls_token": "[CLS]", "mask_token": {"content": "[MASK]", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "sp_model_kwargs": {}, "model_max_length": 512, "special_tokens_map_file": "/home/gunjan/.cache/huggingface/transformers/9a50cd9b3771023230b1128ab5e112461bf36f5826d5f7eb654348e956979a54.a2cfb41d43ad2ac50fa89998bfa88393398e3bb3439ee99e1b00933bce2eb1ba", "name_or_path": "google/fnet-large", "tokenizer_class": "FNetTokenizer"}
checkpoint-477/trainer_state.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
+ "global_step": 477,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "learning_rate": 1.6000000000000003e-05,
13
+ "loss": 0.7217,
14
+ "step": 159
15
+ },
16
+ {
17
+ "epoch": 1.0,
18
+ "eval_accuracy": 0.5633802816901409,
19
+ "eval_loss": 0.6864463686943054,
20
+ "eval_runtime": 1.553,
21
+ "eval_samples_per_second": 45.719,
22
+ "eval_steps_per_second": 5.795,
23
+ "step": 159
24
+ },
25
+ {
26
+ "epoch": 2.0,
27
+ "learning_rate": 1.2e-05,
28
+ "loss": 0.7056,
29
+ "step": 318
30
+ },
31
+ {
32
+ "epoch": 2.0,
33
+ "eval_accuracy": 0.5633802816901409,
34
+ "eval_loss": 0.6868877410888672,
35
+ "eval_runtime": 1.5642,
36
+ "eval_samples_per_second": 45.391,
37
+ "eval_steps_per_second": 5.754,
38
+ "step": 318
39
+ },
40
+ {
41
+ "epoch": 3.0,
42
+ "learning_rate": 8.000000000000001e-06,
43
+ "loss": 0.706,
44
+ "step": 477
45
+ },
46
+ {
47
+ "epoch": 3.0,
48
+ "eval_accuracy": 0.5633802816901409,
49
+ "eval_loss": 0.6874909400939941,
50
+ "eval_runtime": 1.5577,
51
+ "eval_samples_per_second": 45.58,
52
+ "eval_steps_per_second": 5.778,
53
+ "step": 477
54
+ }
55
+ ],
56
+ "max_steps": 795,
57
+ "num_train_epochs": 5,
58
+ "total_flos": 1191798669219840.0,
59
+ "trial_name": null,
60
+ "trial_params": null
61
+ }
checkpoint-477/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b644c1bae6b6798f49b77dc60581484ee19ebf5291a9ad931519f191fc6b6b11
3
+ size 2799
checkpoint-636/config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/fnet-large",
3
+ "architectures": [
4
+ "FNetForSequenceClassification"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "finetuning_task": "wnli",
9
+ "hidden_act": "gelu_new",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "not_entailment",
14
+ "1": "entailment"
15
+ },
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 4096,
18
+ "label2id": {
19
+ "entailment": 1,
20
+ "not_entailment": 0
21
+ },
22
+ "layer_norm_eps": 1e-12,
23
+ "max_position_embeddings": 512,
24
+ "model_type": "fnet",
25
+ "num_hidden_layers": 24,
26
+ "pad_token_id": 3,
27
+ "torch_dtype": "float32",
28
+ "tpu_short_seq_length": 512,
29
+ "transformers_version": "4.11.0.dev0",
30
+ "type_vocab_size": 4,
31
+ "use_tpu_fourier_optimizations": false,
32
+ "vocab_size": 32000
33
+ }
checkpoint-636/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99f0b186a48b2f5cec217b2422e2bad3eb9382bfa3cd4b0789b226e784119452
3
+ size 1895696981
checkpoint-636/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54de71ad34934db6ed956fd8428cfae42131d74bad7bce6cc35fbe74a4e84742
3
+ size 947877033
checkpoint-636/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab51573bdb05c03106e4fc54e41a75dcd33cf42f46a5af90f8797dfe8b6f0248
3
+ size 14503
checkpoint-636/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad181780f1b400cc04b3b1b11db86cebbe1239bfdcaff64bc3f1ea579270f1d2
3
+ size 623
checkpoint-636/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "<unk>", "sep_token": "[SEP]", "pad_token": "<pad>", "cls_token": "[CLS]", "mask_token": {"content": "[MASK]", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
checkpoint-636/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
checkpoint-636/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": false, "remove_space": true, "keep_accents": true, "unk_token": "<unk>", "sep_token": "[SEP]", "pad_token": "<pad>", "cls_token": "[CLS]", "mask_token": {"content": "[MASK]", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "sp_model_kwargs": {}, "model_max_length": 512, "special_tokens_map_file": "/home/gunjan/.cache/huggingface/transformers/9a50cd9b3771023230b1128ab5e112461bf36f5826d5f7eb654348e956979a54.a2cfb41d43ad2ac50fa89998bfa88393398e3bb3439ee99e1b00933bce2eb1ba", "name_or_path": "google/fnet-large", "tokenizer_class": "FNetTokenizer"}
checkpoint-636/trainer_state.json ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 4.0,
5
+ "global_step": 636,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "learning_rate": 1.6000000000000003e-05,
13
+ "loss": 0.7217,
14
+ "step": 159
15
+ },
16
+ {
17
+ "epoch": 1.0,
18
+ "eval_accuracy": 0.5633802816901409,
19
+ "eval_loss": 0.6864463686943054,
20
+ "eval_runtime": 1.553,
21
+ "eval_samples_per_second": 45.719,
22
+ "eval_steps_per_second": 5.795,
23
+ "step": 159
24
+ },
25
+ {
26
+ "epoch": 2.0,
27
+ "learning_rate": 1.2e-05,
28
+ "loss": 0.7056,
29
+ "step": 318
30
+ },
31
+ {
32
+ "epoch": 2.0,
33
+ "eval_accuracy": 0.5633802816901409,
34
+ "eval_loss": 0.6868877410888672,
35
+ "eval_runtime": 1.5642,
36
+ "eval_samples_per_second": 45.391,
37
+ "eval_steps_per_second": 5.754,
38
+ "step": 318
39
+ },
40
+ {
41
+ "epoch": 3.0,
42
+ "learning_rate": 8.000000000000001e-06,
43
+ "loss": 0.706,
44
+ "step": 477
45
+ },
46
+ {
47
+ "epoch": 3.0,
48
+ "eval_accuracy": 0.5633802816901409,
49
+ "eval_loss": 0.6874909400939941,
50
+ "eval_runtime": 1.5577,
51
+ "eval_samples_per_second": 45.58,
52
+ "eval_steps_per_second": 5.778,
53
+ "step": 477
54
+ },
55
+ {
56
+ "epoch": 4.0,
57
+ "learning_rate": 4.000000000000001e-06,
58
+ "loss": 0.7032,
59
+ "step": 636
60
+ },
61
+ {
62
+ "epoch": 4.0,
63
+ "eval_accuracy": 0.5633802816901409,
64
+ "eval_loss": 0.693115770816803,
65
+ "eval_runtime": 1.5577,
66
+ "eval_samples_per_second": 45.58,
67
+ "eval_steps_per_second": 5.778,
68
+ "step": 636
69
+ }
70
+ ],
71
+ "max_steps": 795,
72
+ "num_train_epochs": 5,
73
+ "total_flos": 1589064892293120.0,
74
+ "trial_name": null,
75
+ "trial_params": null
76
+ }
checkpoint-636/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b644c1bae6b6798f49b77dc60581484ee19ebf5291a9ad931519f191fc6b6b11
3
+ size 2799
checkpoint-795/config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/fnet-large",
3
+ "architectures": [
4
+ "FNetForSequenceClassification"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "finetuning_task": "wnli",
9
+ "hidden_act": "gelu_new",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "not_entailment",
14
+ "1": "entailment"
15
+ },
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 4096,
18
+ "label2id": {
19
+ "entailment": 1,
20
+ "not_entailment": 0
21
+ },
22
+ "layer_norm_eps": 1e-12,
23
+ "max_position_embeddings": 512,
24
+ "model_type": "fnet",
25
+ "num_hidden_layers": 24,
26
+ "pad_token_id": 3,
27
+ "torch_dtype": "float32",
28
+ "tpu_short_seq_length": 512,
29
+ "transformers_version": "4.11.0.dev0",
30
+ "type_vocab_size": 4,
31
+ "use_tpu_fourier_optimizations": false,
32
+ "vocab_size": 32000
33
+ }
checkpoint-795/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7317731ce1bf4da19b09bc8f82fa132fc1e41895962bf379f1874e292fb6887b
3
+ size 1895696981
checkpoint-795/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:394144643c97b33b5b4ade657a741e8cd7f1aa0da201ed7803847233fecf4e02
3
+ size 947877033
checkpoint-795/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fff540823935b4771a0db2065ea0b8c16448384274f74b8b467c2d9fb883ac66
3
+ size 14503
checkpoint-795/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf1961577efca24a442d0f62693362a2afd9b47c3c55043080beedcf431c79aa
3
+ size 623
checkpoint-795/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "<unk>", "sep_token": "[SEP]", "pad_token": "<pad>", "cls_token": "[CLS]", "mask_token": {"content": "[MASK]", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
checkpoint-795/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
checkpoint-795/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": false, "remove_space": true, "keep_accents": true, "unk_token": "<unk>", "sep_token": "[SEP]", "pad_token": "<pad>", "cls_token": "[CLS]", "mask_token": {"content": "[MASK]", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "sp_model_kwargs": {}, "model_max_length": 512, "special_tokens_map_file": "/home/gunjan/.cache/huggingface/transformers/9a50cd9b3771023230b1128ab5e112461bf36f5826d5f7eb654348e956979a54.a2cfb41d43ad2ac50fa89998bfa88393398e3bb3439ee99e1b00933bce2eb1ba", "name_or_path": "google/fnet-large", "tokenizer_class": "FNetTokenizer"}
checkpoint-795/trainer_state.json ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 5.0,
5
+ "global_step": 795,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "learning_rate": 1.6000000000000003e-05,
13
+ "loss": 0.7217,
14
+ "step": 159
15
+ },
16
+ {
17
+ "epoch": 1.0,
18
+ "eval_accuracy": 0.5633802816901409,
19
+ "eval_loss": 0.6864463686943054,
20
+ "eval_runtime": 1.553,
21
+ "eval_samples_per_second": 45.719,
22
+ "eval_steps_per_second": 5.795,
23
+ "step": 159
24
+ },
25
+ {
26
+ "epoch": 2.0,
27
+ "learning_rate": 1.2e-05,
28
+ "loss": 0.7056,
29
+ "step": 318
30
+ },
31
+ {
32
+ "epoch": 2.0,
33
+ "eval_accuracy": 0.5633802816901409,
34
+ "eval_loss": 0.6868877410888672,
35
+ "eval_runtime": 1.5642,
36
+ "eval_samples_per_second": 45.391,
37
+ "eval_steps_per_second": 5.754,
38
+ "step": 318
39
+ },
40
+ {
41
+ "epoch": 3.0,
42
+ "learning_rate": 8.000000000000001e-06,
43
+ "loss": 0.706,
44
+ "step": 477
45
+ },
46
+ {
47
+ "epoch": 3.0,
48
+ "eval_accuracy": 0.5633802816901409,
49
+ "eval_loss": 0.6874909400939941,
50
+ "eval_runtime": 1.5577,
51
+ "eval_samples_per_second": 45.58,
52
+ "eval_steps_per_second": 5.778,
53
+ "step": 477
54
+ },
55
+ {
56
+ "epoch": 4.0,
57
+ "learning_rate": 4.000000000000001e-06,
58
+ "loss": 0.7032,
59
+ "step": 636
60
+ },
61
+ {
62
+ "epoch": 4.0,
63
+ "eval_accuracy": 0.5633802816901409,
64
+ "eval_loss": 0.693115770816803,
65
+ "eval_runtime": 1.5577,
66
+ "eval_samples_per_second": 45.58,
67
+ "eval_steps_per_second": 5.778,
68
+ "step": 636
69
+ },
70
+ {
71
+ "epoch": 5.0,
72
+ "learning_rate": 0.0,
73
+ "loss": 0.7025,
74
+ "step": 795
75
+ },
76
+ {
77
+ "epoch": 5.0,
78
+ "eval_accuracy": 0.38028169014084506,
79
+ "eval_loss": 0.695301353931427,
80
+ "eval_runtime": 1.5507,
81
+ "eval_samples_per_second": 45.786,
82
+ "eval_steps_per_second": 5.804,
83
+ "step": 795
84
+ }
85
+ ],
86
+ "max_steps": 795,
87
+ "num_train_epochs": 5,
88
+ "total_flos": 1986331115366400.0,
89
+ "trial_name": null,
90
+ "trial_params": null
91
+ }
checkpoint-795/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b644c1bae6b6798f49b77dc60581484ee19ebf5291a9ad931519f191fc6b6b11
3
+ size 2799
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.38028169014084506,
4
+ "eval_loss": 0.695301353931427,
5
+ "eval_runtime": 1.5768,
6
+ "eval_samples": 71,
7
+ "eval_samples_per_second": 45.028,
8
+ "eval_steps_per_second": 5.708
9
+ }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e6dba7066dc290f14ef100330a2da6f3b7cffc7e3195d2b1ba23cb5de36c8830
3
  size 947877033
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:394144643c97b33b5b4ade657a741e8cd7f1aa0da201ed7803847233fecf4e02
3
  size 947877033
runs/Sep23_05-28-20_patrick-general-gpu/events.out.tfevents.1632374926.patrick-general-gpu.961400.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5dfcab53911009bcd50d61628966e7ae437d1559ee8bfdb8e946acf2f82613d6
3
- size 3668
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc6ed1821b9ab36dea428e424add43e97aa342fa15b5035981fb6b3e3bf6ba52
3
+ size 5942
runs/Sep23_05-28-20_patrick-general-gpu/events.out.tfevents.1632375225.patrick-general-gpu.961400.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e8a3cdf7faaae201c01127dce79b203f091cff7cc37f60c8ca2de769ca0c1f8
3
+ size 363
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "train_loss": 0.7078151103085691,
4
+ "train_runtime": 287.2264,
5
+ "train_samples": 635,
6
+ "train_samples_per_second": 11.054,
7
+ "train_steps_per_second": 2.768
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 5.0,
5
+ "global_step": 795,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "learning_rate": 1.6000000000000003e-05,
13
+ "loss": 0.7217,
14
+ "step": 159
15
+ },
16
+ {
17
+ "epoch": 1.0,
18
+ "eval_accuracy": 0.5633802816901409,
19
+ "eval_loss": 0.6864463686943054,
20
+ "eval_runtime": 1.553,
21
+ "eval_samples_per_second": 45.719,
22
+ "eval_steps_per_second": 5.795,
23
+ "step": 159
24
+ },
25
+ {
26
+ "epoch": 2.0,
27
+ "learning_rate": 1.2e-05,
28
+ "loss": 0.7056,
29
+ "step": 318
30
+ },
31
+ {
32
+ "epoch": 2.0,
33
+ "eval_accuracy": 0.5633802816901409,
34
+ "eval_loss": 0.6868877410888672,
35
+ "eval_runtime": 1.5642,
36
+ "eval_samples_per_second": 45.391,
37
+ "eval_steps_per_second": 5.754,
38
+ "step": 318
39
+ },
40
+ {
41
+ "epoch": 3.0,
42
+ "learning_rate": 8.000000000000001e-06,
43
+ "loss": 0.706,
44
+ "step": 477
45
+ },
46
+ {
47
+ "epoch": 3.0,
48
+ "eval_accuracy": 0.5633802816901409,
49
+ "eval_loss": 0.6874909400939941,
50
+ "eval_runtime": 1.5577,
51
+ "eval_samples_per_second": 45.58,
52
+ "eval_steps_per_second": 5.778,
53
+ "step": 477
54
+ },
55
+ {
56
+ "epoch": 4.0,
57
+ "learning_rate": 4.000000000000001e-06,
58
+ "loss": 0.7032,
59
+ "step": 636
60
+ },
61
+ {
62
+ "epoch": 4.0,
63
+ "eval_accuracy": 0.5633802816901409,
64
+ "eval_loss": 0.693115770816803,
65
+ "eval_runtime": 1.5577,
66
+ "eval_samples_per_second": 45.58,
67
+ "eval_steps_per_second": 5.778,
68
+ "step": 636
69
+ },
70
+ {
71
+ "epoch": 5.0,
72
+ "learning_rate": 0.0,
73
+ "loss": 0.7025,
74
+ "step": 795
75
+ },
76
+ {
77
+ "epoch": 5.0,
78
+ "eval_accuracy": 0.38028169014084506,
79
+ "eval_loss": 0.695301353931427,
80
+ "eval_runtime": 1.5507,
81
+ "eval_samples_per_second": 45.786,
82
+ "eval_steps_per_second": 5.804,
83
+ "step": 795
84
+ },
85
+ {
86
+ "epoch": 5.0,
87
+ "step": 795,
88
+ "total_flos": 1986331115366400.0,
89
+ "train_loss": 0.7078151103085691,
90
+ "train_runtime": 287.2264,
91
+ "train_samples_per_second": 11.054,
92
+ "train_steps_per_second": 2.768
93
+ }
94
+ ],
95
+ "max_steps": 795,
96
+ "num_train_epochs": 5,
97
+ "total_flos": 1986331115366400.0,
98
+ "trial_name": null,
99
+ "trial_params": null
100
+ }