marksverdhei commited on
Commit
df24829
1 Parent(s): f69b7b2
config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "t5-large",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 4096,
7
+ "d_kv": 64,
8
+ "d_model": 1024,
9
+ "decoder_start_token_id": 0,
10
+ "dropout_rate": 0.1,
11
+ "eos_token_id": 1,
12
+ "feed_forward_proj": "relu",
13
+ "initializer_factor": 1.0,
14
+ "is_encoder_decoder": true,
15
+ "layer_norm_epsilon": 1e-06,
16
+ "model_type": "t5",
17
+ "n_positions": 512,
18
+ "num_decoder_layers": 24,
19
+ "num_heads": 16,
20
+ "num_layers": 24,
21
+ "output_past": true,
22
+ "pad_token_id": 0,
23
+ "relative_attention_max_distance": 128,
24
+ "relative_attention_num_buckets": 32,
25
+ "task_specific_params": {
26
+ "summarization": {
27
+ "early_stopping": true,
28
+ "length_penalty": 2.0,
29
+ "max_length": 200,
30
+ "min_length": 30,
31
+ "no_repeat_ngram_size": 3,
32
+ "num_beams": 4,
33
+ "prefix": "summarize: "
34
+ },
35
+ "translation_en_to_de": {
36
+ "early_stopping": true,
37
+ "max_length": 300,
38
+ "num_beams": 4,
39
+ "prefix": "translate English to German: "
40
+ },
41
+ "translation_en_to_fr": {
42
+ "early_stopping": true,
43
+ "max_length": 300,
44
+ "num_beams": 4,
45
+ "prefix": "translate English to French: "
46
+ },
47
+ "translation_en_to_ro": {
48
+ "early_stopping": true,
49
+ "max_length": 300,
50
+ "num_beams": 4,
51
+ "prefix": "translate English to Romanian: "
52
+ }
53
+ },
54
+ "torch_dtype": "float32",
55
+ "transformers_version": "4.18.0",
56
+ "use_cache": true,
57
+ "vocab_size": 32128
58
+ }
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebd2117ae4e13632591fb12df0a776c72477e93473292a6ed69227ecbd85b1ad
3
+ size 5352924
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33a4cff6e449e5cd4f3271f6e03731317b08ec80f237d97a04757dc7082e8024
3
+ size 2950844807
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1efa0f1eefbed09f46946dafb48b8c4fdecc8ed780acf7d66316668493405cb5
3
+ size 14503
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a689715f971a4c8d1a10a754858d2aa4354a3d29075ef989fc3d0bd671436ac
3
+ size 623
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"]}
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d60acb128cf7b7f2536e8f38a5b18a05535c9e14c7a355904270e15b0945ea86
3
+ size 791656
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "extra_ids": 100, "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"], "sp_model_kwargs": {}, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "t5-large", "tokenizer_class": "T5Tokenizer"}
trainer_state.json ADDED
@@ -0,0 +1,440 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 1.1455323696136475,
3
+ "best_model_checkpoint": "./checkpoints/t5-large/checkpoint-13314",
4
+ "epoch": 14.0,
5
+ "global_step": 13314,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.27,
12
+ "learning_rate": 2.5e-06,
13
+ "loss": 12.6774,
14
+ "step": 256
15
+ },
16
+ {
17
+ "epoch": 0.54,
18
+ "learning_rate": 5e-06,
19
+ "loss": 5.0181,
20
+ "step": 512
21
+ },
22
+ {
23
+ "epoch": 0.81,
24
+ "learning_rate": 4.930840717527556e-06,
25
+ "loss": 1.8667,
26
+ "step": 768
27
+ },
28
+ {
29
+ "epoch": 1.0,
30
+ "eval_loss": 1.3393584489822388,
31
+ "eval_runtime": 7.0532,
32
+ "eval_samples_per_second": 70.89,
33
+ "eval_steps_per_second": 8.932,
34
+ "step": 951
35
+ },
36
+ {
37
+ "epoch": 1.08,
38
+ "learning_rate": 4.8616814350551114e-06,
39
+ "loss": 1.5018,
40
+ "step": 1024
41
+ },
42
+ {
43
+ "epoch": 1.35,
44
+ "learning_rate": 4.792522152582667e-06,
45
+ "loss": 1.343,
46
+ "step": 1280
47
+ },
48
+ {
49
+ "epoch": 1.62,
50
+ "learning_rate": 4.723362870110223e-06,
51
+ "loss": 1.334,
52
+ "step": 1536
53
+ },
54
+ {
55
+ "epoch": 1.88,
56
+ "learning_rate": 4.654203587637779e-06,
57
+ "loss": 1.2965,
58
+ "step": 1792
59
+ },
60
+ {
61
+ "epoch": 2.0,
62
+ "eval_loss": 1.22269606590271,
63
+ "eval_runtime": 7.1665,
64
+ "eval_samples_per_second": 69.77,
65
+ "eval_steps_per_second": 8.791,
66
+ "step": 1902
67
+ },
68
+ {
69
+ "epoch": 2.15,
70
+ "learning_rate": 4.585044305165334e-06,
71
+ "loss": 1.2232,
72
+ "step": 2048
73
+ },
74
+ {
75
+ "epoch": 2.42,
76
+ "learning_rate": 4.51588502269289e-06,
77
+ "loss": 1.2218,
78
+ "step": 2304
79
+ },
80
+ {
81
+ "epoch": 2.69,
82
+ "learning_rate": 4.446725740220445e-06,
83
+ "loss": 1.2472,
84
+ "step": 2560
85
+ },
86
+ {
87
+ "epoch": 2.96,
88
+ "learning_rate": 4.377566457748001e-06,
89
+ "loss": 1.2539,
90
+ "step": 2816
91
+ },
92
+ {
93
+ "epoch": 3.0,
94
+ "eval_loss": 1.1890126466751099,
95
+ "eval_runtime": 7.0492,
96
+ "eval_samples_per_second": 70.93,
97
+ "eval_steps_per_second": 8.937,
98
+ "step": 2853
99
+ },
100
+ {
101
+ "epoch": 3.23,
102
+ "learning_rate": 4.308407175275557e-06,
103
+ "loss": 1.215,
104
+ "step": 3072
105
+ },
106
+ {
107
+ "epoch": 3.5,
108
+ "learning_rate": 4.239247892803113e-06,
109
+ "loss": 1.1736,
110
+ "step": 3328
111
+ },
112
+ {
113
+ "epoch": 3.77,
114
+ "learning_rate": 4.170088610330668e-06,
115
+ "loss": 1.1738,
116
+ "step": 3584
117
+ },
118
+ {
119
+ "epoch": 4.0,
120
+ "eval_loss": 1.1715768575668335,
121
+ "eval_runtime": 6.9966,
122
+ "eval_samples_per_second": 71.463,
123
+ "eval_steps_per_second": 9.004,
124
+ "step": 3804
125
+ },
126
+ {
127
+ "epoch": 4.04,
128
+ "learning_rate": 4.100929327858224e-06,
129
+ "loss": 1.2254,
130
+ "step": 3840
131
+ },
132
+ {
133
+ "epoch": 4.31,
134
+ "learning_rate": 4.031770045385779e-06,
135
+ "loss": 1.2022,
136
+ "step": 4096
137
+ },
138
+ {
139
+ "epoch": 4.58,
140
+ "learning_rate": 3.962610762913335e-06,
141
+ "loss": 1.1542,
142
+ "step": 4352
143
+ },
144
+ {
145
+ "epoch": 4.85,
146
+ "learning_rate": 3.893451480440891e-06,
147
+ "loss": 1.1795,
148
+ "step": 4608
149
+ },
150
+ {
151
+ "epoch": 5.0,
152
+ "eval_loss": 1.162368655204773,
153
+ "eval_runtime": 6.9995,
154
+ "eval_samples_per_second": 71.433,
155
+ "eval_steps_per_second": 9.001,
156
+ "step": 4755
157
+ },
158
+ {
159
+ "epoch": 5.11,
160
+ "learning_rate": 3.824292197968447e-06,
161
+ "loss": 1.1573,
162
+ "step": 4864
163
+ },
164
+ {
165
+ "epoch": 5.38,
166
+ "learning_rate": 3.755132915496002e-06,
167
+ "loss": 1.1332,
168
+ "step": 5120
169
+ },
170
+ {
171
+ "epoch": 5.65,
172
+ "learning_rate": 3.685973633023558e-06,
173
+ "loss": 1.1795,
174
+ "step": 5376
175
+ },
176
+ {
177
+ "epoch": 5.92,
178
+ "learning_rate": 3.616814350551113e-06,
179
+ "loss": 1.1308,
180
+ "step": 5632
181
+ },
182
+ {
183
+ "epoch": 6.0,
184
+ "eval_loss": 1.1566176414489746,
185
+ "eval_runtime": 7.0064,
186
+ "eval_samples_per_second": 71.364,
187
+ "eval_steps_per_second": 8.992,
188
+ "step": 5706
189
+ },
190
+ {
191
+ "epoch": 6.19,
192
+ "learning_rate": 3.547655068078669e-06,
193
+ "loss": 1.1676,
194
+ "step": 5888
195
+ },
196
+ {
197
+ "epoch": 6.46,
198
+ "learning_rate": 3.4784957856062245e-06,
199
+ "loss": 1.1117,
200
+ "step": 6144
201
+ },
202
+ {
203
+ "epoch": 6.73,
204
+ "learning_rate": 3.4093365031337805e-06,
205
+ "loss": 1.1289,
206
+ "step": 6400
207
+ },
208
+ {
209
+ "epoch": 7.0,
210
+ "learning_rate": 3.340177220661336e-06,
211
+ "loss": 1.1195,
212
+ "step": 6656
213
+ },
214
+ {
215
+ "epoch": 7.0,
216
+ "eval_loss": 1.1531755924224854,
217
+ "eval_runtime": 6.9771,
218
+ "eval_samples_per_second": 71.663,
219
+ "eval_steps_per_second": 9.03,
220
+ "step": 6657
221
+ },
222
+ {
223
+ "epoch": 7.27,
224
+ "learning_rate": 3.271017938188892e-06,
225
+ "loss": 1.1091,
226
+ "step": 6912
227
+ },
228
+ {
229
+ "epoch": 7.54,
230
+ "learning_rate": 3.201858655716447e-06,
231
+ "loss": 1.1003,
232
+ "step": 7168
233
+ },
234
+ {
235
+ "epoch": 7.81,
236
+ "learning_rate": 3.132699373244003e-06,
237
+ "loss": 1.1075,
238
+ "step": 7424
239
+ },
240
+ {
241
+ "epoch": 8.0,
242
+ "eval_loss": 1.1493767499923706,
243
+ "eval_runtime": 6.9872,
244
+ "eval_samples_per_second": 71.56,
245
+ "eval_steps_per_second": 9.017,
246
+ "step": 7608
247
+ },
248
+ {
249
+ "epoch": 8.08,
250
+ "learning_rate": 3.0635400907715584e-06,
251
+ "loss": 1.1445,
252
+ "step": 7680
253
+ },
254
+ {
255
+ "epoch": 8.34,
256
+ "learning_rate": 2.9943808082991144e-06,
257
+ "loss": 1.0677,
258
+ "step": 7936
259
+ },
260
+ {
261
+ "epoch": 8.61,
262
+ "learning_rate": 2.92522152582667e-06,
263
+ "loss": 1.1393,
264
+ "step": 8192
265
+ },
266
+ {
267
+ "epoch": 8.88,
268
+ "learning_rate": 2.856062243354226e-06,
269
+ "loss": 1.0989,
270
+ "step": 8448
271
+ },
272
+ {
273
+ "epoch": 9.0,
274
+ "eval_loss": 1.1490625143051147,
275
+ "eval_runtime": 7.0593,
276
+ "eval_samples_per_second": 70.828,
277
+ "eval_steps_per_second": 8.924,
278
+ "step": 8559
279
+ },
280
+ {
281
+ "epoch": 9.15,
282
+ "learning_rate": 2.786902960881781e-06,
283
+ "loss": 1.0875,
284
+ "step": 8704
285
+ },
286
+ {
287
+ "epoch": 9.42,
288
+ "learning_rate": 2.717743678409337e-06,
289
+ "loss": 1.0832,
290
+ "step": 8960
291
+ },
292
+ {
293
+ "epoch": 9.69,
294
+ "learning_rate": 2.6485843959368923e-06,
295
+ "loss": 1.0572,
296
+ "step": 9216
297
+ },
298
+ {
299
+ "epoch": 9.96,
300
+ "learning_rate": 2.5794251134644482e-06,
301
+ "loss": 1.097,
302
+ "step": 9472
303
+ },
304
+ {
305
+ "epoch": 10.0,
306
+ "eval_loss": 1.1468651294708252,
307
+ "eval_runtime": 7.1468,
308
+ "eval_samples_per_second": 69.962,
309
+ "eval_steps_per_second": 8.815,
310
+ "step": 9510
311
+ },
312
+ {
313
+ "epoch": 10.23,
314
+ "learning_rate": 2.5102658309920038e-06,
315
+ "loss": 1.0672,
316
+ "step": 9728
317
+ },
318
+ {
319
+ "epoch": 10.5,
320
+ "learning_rate": 2.4411065485195593e-06,
321
+ "loss": 1.0832,
322
+ "step": 9984
323
+ },
324
+ {
325
+ "epoch": 10.77,
326
+ "learning_rate": 2.3719472660471148e-06,
327
+ "loss": 1.0644,
328
+ "step": 10240
329
+ },
330
+ {
331
+ "epoch": 11.0,
332
+ "eval_loss": 1.1458113193511963,
333
+ "eval_runtime": 7.0737,
334
+ "eval_samples_per_second": 70.684,
335
+ "eval_steps_per_second": 8.906,
336
+ "step": 10461
337
+ },
338
+ {
339
+ "epoch": 11.04,
340
+ "learning_rate": 2.3027879835746707e-06,
341
+ "loss": 1.0828,
342
+ "step": 10496
343
+ },
344
+ {
345
+ "epoch": 11.31,
346
+ "learning_rate": 2.233628701102226e-06,
347
+ "loss": 1.0609,
348
+ "step": 10752
349
+ },
350
+ {
351
+ "epoch": 11.58,
352
+ "learning_rate": 2.1644694186297817e-06,
353
+ "loss": 1.0648,
354
+ "step": 11008
355
+ },
356
+ {
357
+ "epoch": 11.84,
358
+ "learning_rate": 2.0953101361573376e-06,
359
+ "loss": 1.0587,
360
+ "step": 11264
361
+ },
362
+ {
363
+ "epoch": 12.0,
364
+ "eval_loss": 1.146124005317688,
365
+ "eval_runtime": 7.0057,
366
+ "eval_samples_per_second": 71.371,
367
+ "eval_steps_per_second": 8.993,
368
+ "step": 11412
369
+ },
370
+ {
371
+ "epoch": 12.11,
372
+ "learning_rate": 2.026150853684893e-06,
373
+ "loss": 1.0746,
374
+ "step": 11520
375
+ },
376
+ {
377
+ "epoch": 12.38,
378
+ "learning_rate": 1.9569915712124487e-06,
379
+ "loss": 1.0449,
380
+ "step": 11776
381
+ },
382
+ {
383
+ "epoch": 12.65,
384
+ "learning_rate": 1.8878322887400044e-06,
385
+ "loss": 1.0563,
386
+ "step": 12032
387
+ },
388
+ {
389
+ "epoch": 12.92,
390
+ "learning_rate": 1.81867300626756e-06,
391
+ "loss": 1.0635,
392
+ "step": 12288
393
+ },
394
+ {
395
+ "epoch": 13.0,
396
+ "eval_loss": 1.1461944580078125,
397
+ "eval_runtime": 7.0215,
398
+ "eval_samples_per_second": 71.209,
399
+ "eval_steps_per_second": 8.972,
400
+ "step": 12363
401
+ },
402
+ {
403
+ "epoch": 13.19,
404
+ "learning_rate": 1.7495137237951156e-06,
405
+ "loss": 1.0407,
406
+ "step": 12544
407
+ },
408
+ {
409
+ "epoch": 13.46,
410
+ "learning_rate": 1.6803544413226713e-06,
411
+ "loss": 1.0555,
412
+ "step": 12800
413
+ },
414
+ {
415
+ "epoch": 13.73,
416
+ "learning_rate": 1.611195158850227e-06,
417
+ "loss": 1.0174,
418
+ "step": 13056
419
+ },
420
+ {
421
+ "epoch": 14.0,
422
+ "learning_rate": 1.5420358763777825e-06,
423
+ "loss": 1.0661,
424
+ "step": 13312
425
+ },
426
+ {
427
+ "epoch": 14.0,
428
+ "eval_loss": 1.1455323696136475,
429
+ "eval_runtime": 7.1221,
430
+ "eval_samples_per_second": 70.204,
431
+ "eval_steps_per_second": 8.846,
432
+ "step": 13314
433
+ }
434
+ ],
435
+ "max_steps": 19020,
436
+ "num_train_epochs": 20,
437
+ "total_flos": 2.3059242172416e+17,
438
+ "trial_name": null,
439
+ "trial_params": null
440
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db5044347200b22c64a617422cc9b445b0a6e8dbf9fc07addb3937d23bb4c7e6
3
+ size 3119