jfernandez commited on
Commit
85bf5fd
1 Parent(s): 5839feb

Upload 12 files

Browse files
checkpoint-10000/config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "RobertaForMaskedLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 514,
16
+ "model_type": "roberta",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 6,
19
+ "pad_token_id": 1,
20
+ "position_embedding_type": "absolute",
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.25.1",
23
+ "type_vocab_size": 1,
24
+ "use_cache": true,
25
+ "vocab_size": 52000
26
+ }
checkpoint-10000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d528b6983aea75e56ee61b1ab94fb125f4dadd9f8c62fb5ff8d68baf5d47b212
3
+ size 668097477
checkpoint-10000/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fece542e3e1d5eee328cf5ab03fe4f57aaba90eeccf613b89be0ad0dcb792ef3
3
+ size 334058169
checkpoint-10000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f1f964d633a8d747a79e604915cf6a0e0040c812fc0ddf5a173fbcd5b4e9beb
3
+ size 14575
checkpoint-10000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f3b81df0eebc3ff73583701a929ff0c4e18f1a9be3237af6d1f9dad4091c54b
3
+ size 627
checkpoint-10000/trainer_state.json ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.7599361653621096,
5
+ "global_step": 10000,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.04,
12
+ "learning_rate": 4.810015958659473e-05,
13
+ "loss": 7.3864,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.08,
18
+ "learning_rate": 4.6200319173189456e-05,
19
+ "loss": 6.8171,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 0.11,
24
+ "learning_rate": 4.430047875978418e-05,
25
+ "loss": 6.6465,
26
+ "step": 1500
27
+ },
28
+ {
29
+ "epoch": 0.15,
30
+ "learning_rate": 4.240063834637891e-05,
31
+ "loss": 6.5187,
32
+ "step": 2000
33
+ },
34
+ {
35
+ "epoch": 0.19,
36
+ "learning_rate": 4.050079793297363e-05,
37
+ "loss": 6.4542,
38
+ "step": 2500
39
+ },
40
+ {
41
+ "epoch": 0.23,
42
+ "learning_rate": 3.860095751956836e-05,
43
+ "loss": 6.392,
44
+ "step": 3000
45
+ },
46
+ {
47
+ "epoch": 0.27,
48
+ "learning_rate": 3.6701117106163084e-05,
49
+ "loss": 6.2944,
50
+ "step": 3500
51
+ },
52
+ {
53
+ "epoch": 0.3,
54
+ "learning_rate": 3.480127669275781e-05,
55
+ "loss": 6.1424,
56
+ "step": 4000
57
+ },
58
+ {
59
+ "epoch": 0.34,
60
+ "learning_rate": 3.290143627935254e-05,
61
+ "loss": 6.1603,
62
+ "step": 4500
63
+ },
64
+ {
65
+ "epoch": 0.38,
66
+ "learning_rate": 3.1001595865947265e-05,
67
+ "loss": 6.0149,
68
+ "step": 5000
69
+ },
70
+ {
71
+ "epoch": 0.42,
72
+ "learning_rate": 2.910175545254199e-05,
73
+ "loss": 5.9559,
74
+ "step": 5500
75
+ },
76
+ {
77
+ "epoch": 0.46,
78
+ "learning_rate": 2.7201915039136716e-05,
79
+ "loss": 5.8511,
80
+ "step": 6000
81
+ },
82
+ {
83
+ "epoch": 0.49,
84
+ "learning_rate": 2.5302074625731443e-05,
85
+ "loss": 5.8782,
86
+ "step": 6500
87
+ },
88
+ {
89
+ "epoch": 0.53,
90
+ "learning_rate": 2.3402234212326166e-05,
91
+ "loss": 5.8042,
92
+ "step": 7000
93
+ },
94
+ {
95
+ "epoch": 0.57,
96
+ "learning_rate": 2.1502393798920893e-05,
97
+ "loss": 5.7415,
98
+ "step": 7500
99
+ },
100
+ {
101
+ "epoch": 0.61,
102
+ "learning_rate": 1.9602553385515617e-05,
103
+ "loss": 5.6506,
104
+ "step": 8000
105
+ },
106
+ {
107
+ "epoch": 0.65,
108
+ "learning_rate": 1.7702712972110344e-05,
109
+ "loss": 5.699,
110
+ "step": 8500
111
+ },
112
+ {
113
+ "epoch": 0.68,
114
+ "learning_rate": 1.580287255870507e-05,
115
+ "loss": 5.627,
116
+ "step": 9000
117
+ },
118
+ {
119
+ "epoch": 0.72,
120
+ "learning_rate": 1.3903032145299796e-05,
121
+ "loss": 5.615,
122
+ "step": 9500
123
+ },
124
+ {
125
+ "epoch": 0.76,
126
+ "learning_rate": 1.2003191731894521e-05,
127
+ "loss": 5.4553,
128
+ "step": 10000
129
+ }
130
+ ],
131
+ "max_steps": 13159,
132
+ "num_train_epochs": 1,
133
+ "total_flos": 1084338745132032.0,
134
+ "trial_name": null,
135
+ "trial_params": null
136
+ }
checkpoint-10000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a9da38f20ddf6ab9aae7fb8129a73b1be6438ba4499e818f468e12abf4faafd
3
+ size 3387
config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "RobertaForMaskedLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 514,
16
+ "model_type": "roberta",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 6,
19
+ "pad_token_id": 1,
20
+ "position_embedding_type": "absolute",
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.25.1",
23
+ "type_vocab_size": 1,
24
+ "use_cache": true,
25
+ "vocab_size": 52000
26
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df804d392ccd2da7c023807b67106053ac068f20b5b58af56beda8dc3469a2a7
3
+ size 334058169
runs/Jan07_03-24-07_fec26fa8b457/1673061866.9062183/events.out.tfevents.1673061866.fec26fa8b457.1308.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f00f5ce096cb7a52daf273f093cd6c86bc29442ac21078574aa1a288d5a205b4
3
+ size 5518
runs/Jan07_03-24-07_fec26fa8b457/events.out.tfevents.1673061866.fec26fa8b457.1308.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d6dbbca552448001d8c831c3e6de067cb11ca134d4abd62506af7ebb59c34a1
3
+ size 8082
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a9da38f20ddf6ab9aae7fb8129a73b1be6438ba4499e818f468e12abf4faafd
3
+ size 3387