sello-ralethe commited on
Commit
452db9d
1 Parent(s): 0ad6b25

Initial commit

Browse files
config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "roberta-base",
3
+ "architectures": [
4
+ "RobertaForMaskedLM"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "eos_token_id": 2,
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "roberta",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "pad_token_id": 1,
21
+ "position_embedding_type": "absolute",
22
+ "transformers_version": "4.5.1",
23
+ "type_vocab_size": 1,
24
+ "use_cache": true,
25
+ "vocab_size": 50265
26
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9fc389677b7d0fbe8b3e746a286f51b25bbb36676eb836ea84609cc316c692d
3
+ size 997706211
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a96d9e3828cfae9b5392cd854c720dc1b0785400006ecb6f84dbdeee8d58a93c
3
+ size 498880850
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17ce912848630acf66111cfa8430822d9f8ebf66d307bcf6ff3294b92001119c
3
+ size 623
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "add_prefix_space": false, "errors": "replace", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": "<mask>", "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "roberta-base"}
trainer_state.json ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.9372888823615804,
5
+ "global_step": 20000,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.07,
12
+ "learning_rate": 1.9510451852939737e-05,
13
+ "loss": 2.5338,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.15,
18
+ "learning_rate": 1.9020903705879475e-05,
19
+ "loss": 2.4335,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 0.22,
24
+ "learning_rate": 1.853135555881921e-05,
25
+ "loss": 2.3284,
26
+ "step": 1500
27
+ },
28
+ {
29
+ "epoch": 0.29,
30
+ "learning_rate": 1.8041807411758948e-05,
31
+ "loss": 2.3165,
32
+ "step": 2000
33
+ },
34
+ {
35
+ "epoch": 0.37,
36
+ "learning_rate": 1.7552259264698686e-05,
37
+ "loss": 2.2819,
38
+ "step": 2500
39
+ },
40
+ {
41
+ "epoch": 0.44,
42
+ "learning_rate": 1.706271111763842e-05,
43
+ "loss": 2.2629,
44
+ "step": 3000
45
+ },
46
+ {
47
+ "epoch": 0.51,
48
+ "learning_rate": 1.657316297057816e-05,
49
+ "loss": 2.2099,
50
+ "step": 3500
51
+ },
52
+ {
53
+ "epoch": 0.59,
54
+ "learning_rate": 1.6083614823517894e-05,
55
+ "loss": 2.2439,
56
+ "step": 4000
57
+ },
58
+ {
59
+ "epoch": 0.66,
60
+ "learning_rate": 1.5594066676457633e-05,
61
+ "loss": 2.1858,
62
+ "step": 4500
63
+ },
64
+ {
65
+ "epoch": 0.73,
66
+ "learning_rate": 1.5104518529397367e-05,
67
+ "loss": 2.1387,
68
+ "step": 5000
69
+ },
70
+ {
71
+ "epoch": 0.81,
72
+ "learning_rate": 1.4614970382337104e-05,
73
+ "loss": 2.1852,
74
+ "step": 5500
75
+ },
76
+ {
77
+ "epoch": 0.88,
78
+ "learning_rate": 1.412542223527684e-05,
79
+ "loss": 2.1869,
80
+ "step": 6000
81
+ },
82
+ {
83
+ "epoch": 0.95,
84
+ "learning_rate": 1.3635874088216576e-05,
85
+ "loss": 2.0771,
86
+ "step": 6500
87
+ },
88
+ {
89
+ "epoch": 1.0,
90
+ "eval_loss": 2.0530622005462646,
91
+ "eval_runtime": 23.6575,
92
+ "eval_samples_per_second": 255.817,
93
+ "step": 6809
94
+ },
95
+ {
96
+ "epoch": 1.03,
97
+ "learning_rate": 1.3146325941156315e-05,
98
+ "loss": 2.1258,
99
+ "step": 7000
100
+ },
101
+ {
102
+ "epoch": 1.1,
103
+ "learning_rate": 1.2656777794096052e-05,
104
+ "loss": 2.0593,
105
+ "step": 7500
106
+ },
107
+ {
108
+ "epoch": 1.17,
109
+ "learning_rate": 1.2167229647035787e-05,
110
+ "loss": 2.0118,
111
+ "step": 8000
112
+ },
113
+ {
114
+ "epoch": 1.25,
115
+ "learning_rate": 1.1677681499975524e-05,
116
+ "loss": 1.9987,
117
+ "step": 8500
118
+ },
119
+ {
120
+ "epoch": 1.32,
121
+ "learning_rate": 1.118813335291526e-05,
122
+ "loss": 2.0134,
123
+ "step": 9000
124
+ },
125
+ {
126
+ "epoch": 1.4,
127
+ "learning_rate": 1.0698585205854997e-05,
128
+ "loss": 1.9785,
129
+ "step": 9500
130
+ },
131
+ {
132
+ "epoch": 1.47,
133
+ "learning_rate": 1.0209037058794733e-05,
134
+ "loss": 1.928,
135
+ "step": 10000
136
+ },
137
+ {
138
+ "epoch": 1.54,
139
+ "learning_rate": 9.71948891173447e-06,
140
+ "loss": 1.9685,
141
+ "step": 10500
142
+ },
143
+ {
144
+ "epoch": 1.62,
145
+ "learning_rate": 9.229940764674206e-06,
146
+ "loss": 1.8942,
147
+ "step": 11000
148
+ },
149
+ {
150
+ "epoch": 1.69,
151
+ "learning_rate": 8.740392617613943e-06,
152
+ "loss": 1.9443,
153
+ "step": 11500
154
+ },
155
+ {
156
+ "epoch": 1.76,
157
+ "learning_rate": 8.25084447055368e-06,
158
+ "loss": 1.8947,
159
+ "step": 12000
160
+ },
161
+ {
162
+ "epoch": 1.84,
163
+ "learning_rate": 7.761296323493416e-06,
164
+ "loss": 1.9557,
165
+ "step": 12500
166
+ },
167
+ {
168
+ "epoch": 1.91,
169
+ "learning_rate": 7.271748176433154e-06,
170
+ "loss": 1.9083,
171
+ "step": 13000
172
+ },
173
+ {
174
+ "epoch": 1.98,
175
+ "learning_rate": 6.782200029372889e-06,
176
+ "loss": 1.966,
177
+ "step": 13500
178
+ },
179
+ {
180
+ "epoch": 2.0,
181
+ "eval_loss": 1.8375927209854126,
182
+ "eval_runtime": 23.5457,
183
+ "eval_samples_per_second": 257.032,
184
+ "step": 13618
185
+ },
186
+ {
187
+ "epoch": 2.06,
188
+ "learning_rate": 6.292651882312626e-06,
189
+ "loss": 1.943,
190
+ "step": 14000
191
+ },
192
+ {
193
+ "epoch": 2.13,
194
+ "learning_rate": 5.8031037352523625e-06,
195
+ "loss": 1.7706,
196
+ "step": 14500
197
+ },
198
+ {
199
+ "epoch": 2.2,
200
+ "learning_rate": 5.313555588192098e-06,
201
+ "loss": 1.8079,
202
+ "step": 15000
203
+ },
204
+ {
205
+ "epoch": 2.28,
206
+ "learning_rate": 4.824007441131836e-06,
207
+ "loss": 1.9021,
208
+ "step": 15500
209
+ },
210
+ {
211
+ "epoch": 2.35,
212
+ "learning_rate": 4.334459294071572e-06,
213
+ "loss": 1.8314,
214
+ "step": 16000
215
+ },
216
+ {
217
+ "epoch": 2.42,
218
+ "learning_rate": 3.844911147011309e-06,
219
+ "loss": 1.7882,
220
+ "step": 16500
221
+ },
222
+ {
223
+ "epoch": 2.5,
224
+ "learning_rate": 3.3553629999510454e-06,
225
+ "loss": 1.8472,
226
+ "step": 17000
227
+ },
228
+ {
229
+ "epoch": 2.57,
230
+ "learning_rate": 2.865814852890782e-06,
231
+ "loss": 1.8157,
232
+ "step": 17500
233
+ },
234
+ {
235
+ "epoch": 2.64,
236
+ "learning_rate": 2.3762667058305186e-06,
237
+ "loss": 1.8546,
238
+ "step": 18000
239
+ },
240
+ {
241
+ "epoch": 2.72,
242
+ "learning_rate": 1.8867185587702552e-06,
243
+ "loss": 1.8019,
244
+ "step": 18500
245
+ },
246
+ {
247
+ "epoch": 2.79,
248
+ "learning_rate": 1.3971704117099918e-06,
249
+ "loss": 1.7841,
250
+ "step": 19000
251
+ },
252
+ {
253
+ "epoch": 2.86,
254
+ "learning_rate": 9.076222646497284e-07,
255
+ "loss": 1.7894,
256
+ "step": 19500
257
+ },
258
+ {
259
+ "epoch": 2.94,
260
+ "learning_rate": 4.1807411758946493e-07,
261
+ "loss": 1.8128,
262
+ "step": 20000
263
+ }
264
+ ],
265
+ "max_steps": 20427,
266
+ "num_train_epochs": 3,
267
+ "total_flos": 2165411575408158.0,
268
+ "trial_name": null,
269
+ "trial_params": null
270
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7d2e6087a9e1ec57857fe40f288f744868c4c9e28d28f51cd7c6205755b4384
3
+ size 2351
vocab.json ADDED
The diff for this file is too large to render. See raw diff