JulienRPA commited on
Commit
2e616c0
1 Parent(s): b0b0a69

Training in progress, step 1000

Browse files
added_tokens.json CHANGED
The diff for this file is too large to render. See raw diff
 
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_commit_hash": "f79f78df17138d38be1b6aa5041a13bf715c3e86",
3
  "_name_or_path": "JulienRPA/BERT2BERT_pretrained_LC-QuAD_2.0",
4
  "architectures": [
5
  "EncoderDecoderModel"
@@ -84,7 +84,7 @@
84
  "typical_p": 1.0,
85
  "use_bfloat16": false,
86
  "use_cache": true,
87
- "vocab_size": 31560
88
  },
89
  "decoder_start_token_id": 101,
90
  "early_stopping": true,
@@ -167,7 +167,7 @@
167
  "typical_p": 1.0,
168
  "use_bfloat16": false,
169
  "use_cache": true,
170
- "vocab_size": 31560
171
  },
172
  "eos_token_id": 102,
173
  "is_encoder_decoder": true,
@@ -178,5 +178,5 @@
178
  "pad_token_id": 0,
179
  "torch_dtype": "float32",
180
  "transformers_version": null,
181
- "vocab_size": 31560
182
  }
 
1
  {
2
+ "_commit_hash": "bf0ba286dd1951b2990a0aa57cf72f5caa2c4bfb",
3
  "_name_or_path": "JulienRPA/BERT2BERT_pretrained_LC-QuAD_2.0",
4
  "architectures": [
5
  "EncoderDecoderModel"
 
84
  "typical_p": 1.0,
85
  "use_bfloat16": false,
86
  "use_cache": true,
87
+ "vocab_size": 34522
88
  },
89
  "decoder_start_token_id": 101,
90
  "early_stopping": true,
 
167
  "typical_p": 1.0,
168
  "use_bfloat16": false,
169
  "use_cache": true,
170
+ "vocab_size": 34522
171
  },
172
  "eos_token_id": 102,
173
  "is_encoder_decoder": true,
 
178
  "pad_token_id": 0,
179
  "torch_dtype": "float32",
180
  "transformers_version": null,
181
+ "vocab_size": 34522
182
  }
last-checkpoint/added_tokens.json CHANGED
The diff for this file is too large to render. See raw diff
 
last-checkpoint/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_commit_hash": "f79f78df17138d38be1b6aa5041a13bf715c3e86",
3
  "_name_or_path": "JulienRPA/BERT2BERT_pretrained_LC-QuAD_2.0",
4
  "architectures": [
5
  "EncoderDecoderModel"
@@ -84,7 +84,7 @@
84
  "typical_p": 1.0,
85
  "use_bfloat16": false,
86
  "use_cache": true,
87
- "vocab_size": 31560
88
  },
89
  "decoder_start_token_id": 101,
90
  "early_stopping": true,
@@ -167,7 +167,7 @@
167
  "typical_p": 1.0,
168
  "use_bfloat16": false,
169
  "use_cache": true,
170
- "vocab_size": 31560
171
  },
172
  "eos_token_id": 102,
173
  "is_encoder_decoder": true,
@@ -178,5 +178,5 @@
178
  "pad_token_id": 0,
179
  "torch_dtype": "float32",
180
  "transformers_version": null,
181
- "vocab_size": 31560
182
  }
 
1
  {
2
+ "_commit_hash": "bf0ba286dd1951b2990a0aa57cf72f5caa2c4bfb",
3
  "_name_or_path": "JulienRPA/BERT2BERT_pretrained_LC-QuAD_2.0",
4
  "architectures": [
5
  "EncoderDecoderModel"
 
84
  "typical_p": 1.0,
85
  "use_bfloat16": false,
86
  "use_cache": true,
87
+ "vocab_size": 34522
88
  },
89
  "decoder_start_token_id": 101,
90
  "early_stopping": true,
 
167
  "typical_p": 1.0,
168
  "use_bfloat16": false,
169
  "use_cache": true,
170
+ "vocab_size": 34522
171
  },
172
  "eos_token_id": 102,
173
  "is_encoder_decoder": true,
 
178
  "pad_token_id": 0,
179
  "torch_dtype": "float32",
180
  "transformers_version": null,
181
+ "vocab_size": 34522
182
  }
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2c02848fe198317b9df9c7b015be7a5a8d106b3d8c95581e94d368f12e3bd2aa
3
- size 1987250795
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8166f9df0c99894923580dd1d8f3e44212e0b0ef6371e20f7fa27b1980e7cdcd
3
+ size 2023671531
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8b36224e26915e60a69c3f0ca4cf1c1be64bf420a8b16ec7564af83207802475
3
- size 996026489
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc899faae2d95cfa25532820e4c4876cc4a250be659324f96270d3648acc5d5d
3
+ size 1014236857
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:989bf089e99481fed8a0ca8c42df0ebf959c5055d0443690a60cd2739bd951a3
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:579b9ad044e049c9d6416c19b90ae0aaf3dfa4d6c410d924a85993ad3cd08d50
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:42803a5c9416fe30408add7b6997c032033033ede5cc3b581b72368a5f47393e
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d74cc6285bd526ccecc46e70cc671e33a9af4da21ca6fb0670355de22affc5ab
3
  size 627
last-checkpoint/tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
last-checkpoint/trainer_state.json CHANGED
@@ -1,340 +1,52 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 230.76923076923077,
5
- "global_step": 9000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
9
  "log_history": [
10
  {
11
  "epoch": 12.82,
12
- "learning_rate": 1.2375000000000001e-05,
13
- "loss": 5.1115,
14
  "step": 500
15
  },
16
  {
17
  "epoch": 12.82,
18
- "eval_bleu": 55.8418,
19
  "eval_em": 0.0,
20
- "eval_gen_len": 78.6043,
21
- "eval_loss": 1.7833343744277954,
22
  "eval_rm": 0.0,
23
- "eval_runtime": 216.1632,
24
- "eval_samples_per_second": 1.929,
25
- "eval_steps_per_second": 0.245,
26
  "step": 500
27
  },
28
  {
29
  "epoch": 25.64,
30
- "learning_rate": 2.4875e-05,
31
- "loss": 0.8877,
32
  "step": 1000
33
  },
34
  {
35
  "epoch": 25.64,
36
- "eval_bleu": 88.9639,
37
- "eval_em": 0.2062,
38
- "eval_gen_len": 77.9976,
39
- "eval_loss": 0.3478254973888397,
40
- "eval_rm": 0.2158,
41
- "eval_runtime": 173.1336,
42
- "eval_samples_per_second": 2.409,
43
- "eval_steps_per_second": 0.306,
44
  "step": 1000
45
- },
46
- {
47
- "epoch": 38.46,
48
- "learning_rate": 3.737500000000001e-05,
49
- "loss": 0.1085,
50
- "step": 1500
51
- },
52
- {
53
- "epoch": 38.46,
54
- "eval_bleu": 95.531,
55
- "eval_em": 0.3669,
56
- "eval_gen_len": 80.2854,
57
- "eval_loss": 0.21576985716819763,
58
- "eval_rm": 0.4436,
59
- "eval_runtime": 170.6947,
60
- "eval_samples_per_second": 2.443,
61
- "eval_steps_per_second": 0.31,
62
- "step": 1500
63
- },
64
- {
65
- "epoch": 51.28,
66
- "learning_rate": 4.9875000000000006e-05,
67
- "loss": 0.0437,
68
- "step": 2000
69
- },
70
- {
71
- "epoch": 51.28,
72
- "eval_bleu": 96.2387,
73
- "eval_em": 0.5012,
74
- "eval_gen_len": 80.8729,
75
- "eval_loss": 0.20689286291599274,
76
- "eval_rm": 0.5899,
77
- "eval_runtime": 169.6593,
78
- "eval_samples_per_second": 2.458,
79
- "eval_steps_per_second": 0.312,
80
- "step": 2000
81
- },
82
- {
83
- "epoch": 64.1,
84
- "learning_rate": 4.7448453608247423e-05,
85
- "loss": 0.0276,
86
- "step": 2500
87
- },
88
- {
89
- "epoch": 64.1,
90
- "eval_bleu": 96.2239,
91
- "eval_em": 0.5755,
92
- "eval_gen_len": 80.2662,
93
- "eval_loss": 0.19657467305660248,
94
- "eval_rm": 0.6523,
95
- "eval_runtime": 171.715,
96
- "eval_samples_per_second": 2.428,
97
- "eval_steps_per_second": 0.309,
98
- "step": 2500
99
- },
100
- {
101
- "epoch": 76.92,
102
- "learning_rate": 4.487113402061856e-05,
103
- "loss": 0.0187,
104
- "step": 3000
105
- },
106
- {
107
- "epoch": 76.92,
108
- "eval_bleu": 96.1781,
109
- "eval_em": 0.5564,
110
- "eval_gen_len": 81.753,
111
- "eval_loss": 0.22087490558624268,
112
- "eval_rm": 0.6523,
113
- "eval_runtime": 178.818,
114
- "eval_samples_per_second": 2.332,
115
- "eval_steps_per_second": 0.296,
116
- "step": 3000
117
- },
118
- {
119
- "epoch": 89.74,
120
- "learning_rate": 4.229381443298969e-05,
121
- "loss": 0.0154,
122
- "step": 3500
123
- },
124
- {
125
- "epoch": 89.74,
126
- "eval_bleu": 96.6038,
127
- "eval_em": 0.5707,
128
- "eval_gen_len": 81.0576,
129
- "eval_loss": 0.23550103604793549,
130
- "eval_rm": 0.6739,
131
- "eval_runtime": 177.0475,
132
- "eval_samples_per_second": 2.355,
133
- "eval_steps_per_second": 0.299,
134
- "step": 3500
135
- },
136
- {
137
- "epoch": 102.56,
138
- "learning_rate": 3.9716494845360825e-05,
139
- "loss": 0.0128,
140
- "step": 4000
141
- },
142
- {
143
- "epoch": 102.56,
144
- "eval_bleu": 96.3205,
145
- "eval_em": 0.5827,
146
- "eval_gen_len": 81.6307,
147
- "eval_loss": 0.25351256132125854,
148
- "eval_rm": 0.6906,
149
- "eval_runtime": 173.3396,
150
- "eval_samples_per_second": 2.406,
151
- "eval_steps_per_second": 0.306,
152
- "step": 4000
153
- },
154
- {
155
- "epoch": 115.38,
156
- "learning_rate": 3.713917525773196e-05,
157
- "loss": 0.0096,
158
- "step": 4500
159
- },
160
- {
161
- "epoch": 115.38,
162
- "eval_bleu": 96.604,
163
- "eval_em": 0.6211,
164
- "eval_gen_len": 80.7698,
165
- "eval_loss": 0.23454025387763977,
166
- "eval_rm": 0.7314,
167
- "eval_runtime": 177.5772,
168
- "eval_samples_per_second": 2.348,
169
- "eval_steps_per_second": 0.298,
170
- "step": 4500
171
- },
172
- {
173
- "epoch": 128.21,
174
- "learning_rate": 3.4561855670103095e-05,
175
- "loss": 0.0073,
176
- "step": 5000
177
- },
178
- {
179
- "epoch": 128.21,
180
- "eval_bleu": 96.5656,
181
- "eval_em": 0.6355,
182
- "eval_gen_len": 81.6811,
183
- "eval_loss": 0.22575555741786957,
184
- "eval_rm": 0.7386,
185
- "eval_runtime": 171.583,
186
- "eval_samples_per_second": 2.43,
187
- "eval_steps_per_second": 0.309,
188
- "step": 5000
189
- },
190
- {
191
- "epoch": 141.03,
192
- "learning_rate": 3.1984536082474226e-05,
193
- "loss": 0.0082,
194
- "step": 5500
195
- },
196
- {
197
- "epoch": 141.03,
198
- "eval_bleu": 96.3627,
199
- "eval_em": 0.6019,
200
- "eval_gen_len": 80.5396,
201
- "eval_loss": 0.24117104709148407,
202
- "eval_rm": 0.7122,
203
- "eval_runtime": 171.7828,
204
- "eval_samples_per_second": 2.427,
205
- "eval_steps_per_second": 0.309,
206
- "step": 5500
207
- },
208
- {
209
- "epoch": 153.85,
210
- "learning_rate": 2.9407216494845364e-05,
211
- "loss": 0.0064,
212
- "step": 6000
213
- },
214
- {
215
- "epoch": 153.85,
216
- "eval_bleu": 96.7404,
217
- "eval_em": 0.6331,
218
- "eval_gen_len": 81.5659,
219
- "eval_loss": 0.23870165646076202,
220
- "eval_rm": 0.7338,
221
- "eval_runtime": 176.1891,
222
- "eval_samples_per_second": 2.367,
223
- "eval_steps_per_second": 0.301,
224
- "step": 6000
225
- },
226
- {
227
- "epoch": 166.67,
228
- "learning_rate": 2.6829896907216496e-05,
229
- "loss": 0.0048,
230
- "step": 6500
231
- },
232
- {
233
- "epoch": 166.67,
234
- "eval_bleu": 96.5993,
235
- "eval_em": 0.6211,
236
- "eval_gen_len": 81.5779,
237
- "eval_loss": 0.2463260143995285,
238
- "eval_rm": 0.729,
239
- "eval_runtime": 170.4492,
240
- "eval_samples_per_second": 2.446,
241
- "eval_steps_per_second": 0.311,
242
- "step": 6500
243
- },
244
- {
245
- "epoch": 179.49,
246
- "learning_rate": 2.425257731958763e-05,
247
- "loss": 0.0041,
248
- "step": 7000
249
- },
250
- {
251
- "epoch": 179.49,
252
- "eval_bleu": 96.447,
253
- "eval_em": 0.6307,
254
- "eval_gen_len": 80.5324,
255
- "eval_loss": 0.2447213977575302,
256
- "eval_rm": 0.7386,
257
- "eval_runtime": 166.7311,
258
- "eval_samples_per_second": 2.501,
259
- "eval_steps_per_second": 0.318,
260
- "step": 7000
261
- },
262
- {
263
- "epoch": 192.31,
264
- "learning_rate": 2.1675257731958766e-05,
265
- "loss": 0.0036,
266
- "step": 7500
267
- },
268
- {
269
- "epoch": 192.31,
270
- "eval_bleu": 96.5899,
271
- "eval_em": 0.6163,
272
- "eval_gen_len": 80.7794,
273
- "eval_loss": 0.2510390877723694,
274
- "eval_rm": 0.7314,
275
- "eval_runtime": 173.7032,
276
- "eval_samples_per_second": 2.401,
277
- "eval_steps_per_second": 0.305,
278
- "step": 7500
279
- },
280
- {
281
- "epoch": 205.13,
282
- "learning_rate": 1.9097938144329897e-05,
283
- "loss": 0.0034,
284
- "step": 8000
285
- },
286
- {
287
- "epoch": 205.13,
288
- "eval_bleu": 96.4759,
289
- "eval_em": 0.6331,
290
- "eval_gen_len": 81.7578,
291
- "eval_loss": 0.2551712095737457,
292
- "eval_rm": 0.7434,
293
- "eval_runtime": 170.3668,
294
- "eval_samples_per_second": 2.448,
295
- "eval_steps_per_second": 0.311,
296
- "step": 8000
297
- },
298
- {
299
- "epoch": 217.95,
300
- "learning_rate": 1.6520618556701032e-05,
301
- "loss": 0.0023,
302
- "step": 8500
303
- },
304
- {
305
- "epoch": 217.95,
306
- "eval_bleu": 96.4892,
307
- "eval_em": 0.6379,
308
- "eval_gen_len": 81.9424,
309
- "eval_loss": 0.25497403740882874,
310
- "eval_rm": 0.7482,
311
- "eval_runtime": 168.8592,
312
- "eval_samples_per_second": 2.47,
313
- "eval_steps_per_second": 0.314,
314
- "step": 8500
315
- },
316
- {
317
- "epoch": 230.77,
318
- "learning_rate": 1.3943298969072165e-05,
319
- "loss": 0.0022,
320
- "step": 9000
321
- },
322
- {
323
- "epoch": 230.77,
324
- "eval_bleu": 96.8559,
325
- "eval_em": 0.6475,
326
- "eval_gen_len": 81.5635,
327
- "eval_loss": 0.2539912760257721,
328
- "eval_rm": 0.7506,
329
- "eval_runtime": 158.2741,
330
- "eval_samples_per_second": 2.635,
331
- "eval_steps_per_second": 0.335,
332
- "step": 9000
333
  }
334
  ],
335
  "max_steps": 11700,
336
  "num_train_epochs": 300,
337
- "total_flos": 7171986679845120.0,
338
  "trial_name": null,
339
  "trial_params": null
340
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 25.641025641025642,
5
+ "global_step": 1000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
9
  "log_history": [
10
  {
11
  "epoch": 12.82,
12
+ "learning_rate": 1.24e-05,
13
+ "loss": 4.0279,
14
  "step": 500
15
  },
16
  {
17
  "epoch": 12.82,
18
+ "eval_bleu": 49.841,
19
  "eval_em": 0.0,
20
+ "eval_gen_len": 51.6403,
21
+ "eval_loss": 2.4031472206115723,
22
  "eval_rm": 0.0,
23
+ "eval_runtime": 135.1177,
24
+ "eval_samples_per_second": 3.086,
25
+ "eval_steps_per_second": 0.392,
26
  "step": 500
27
  },
28
  {
29
  "epoch": 25.64,
30
+ "learning_rate": 2.4900000000000002e-05,
31
+ "loss": 1.3442,
32
  "step": 1000
33
  },
34
  {
35
  "epoch": 25.64,
36
+ "eval_bleu": 85.0177,
37
+ "eval_em": 0.0,
38
+ "eval_gen_len": 57.9784,
39
+ "eval_loss": 0.501366138458252,
40
+ "eval_rm": 0.0,
41
+ "eval_runtime": 188.7983,
42
+ "eval_samples_per_second": 2.209,
43
+ "eval_steps_per_second": 0.281,
44
  "step": 1000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  }
46
  ],
47
  "max_steps": 11700,
48
  "num_train_epochs": 300,
49
+ "total_flos": 810312415883520.0,
50
  "trial_name": null,
51
  "trial_params": null
52
  }
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:16b1498745616fbf0a4cd4c1b6cf32468b15b936a8b11b56029767534510acd1
3
  size 4155
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffc9c446a7cf1044793b419a9995d30772c70afe831ef8253e81e0a2b524a602
3
  size 4155
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8b36224e26915e60a69c3f0ca4cf1c1be64bf420a8b16ec7564af83207802475
3
- size 996026489
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc899faae2d95cfa25532820e4c4876cc4a250be659324f96270d3648acc5d5d
3
+ size 1014236857
last-checkpoint/scaler.pt → runs/Jun01_08-15-55_7b8f3300e052/1685608114.229445/events.out.tfevents.1685608114.7b8f3300e052.20157.1 RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5ca42b4728e4a3dd46497569040a4bcb0499b808d030d537723afa704d3d5cea
3
- size 557
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70231ea6ac78bdfb93119ad37a0b717a97fda6ea52c86a06bbe4576fbebdf834
3
+ size 6249
runs/Jun01_08-15-55_7b8f3300e052/events.out.tfevents.1685608114.7b8f3300e052.20157.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26d5c01100abcc1057fe393c3a7e9a393fcde1b5d1a136fa5563abc64870ed85
3
+ size 9795
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:16b1498745616fbf0a4cd4c1b6cf32468b15b936a8b11b56029767534510acd1
3
  size 4155
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffc9c446a7cf1044793b419a9995d30772c70afe831ef8253e81e0a2b524a602
3
  size 4155