agentlans commited on
Commit
c4ebb55
·
verified ·
1 Parent(s): fd1013d

Upload 8 files

Browse files
README.md CHANGED
@@ -4,19 +4,19 @@ base_model: agentlans/multilingual-e5-small-aligned
4
  tags:
5
  - generated_from_trainer
6
  model-index:
7
- - name: multilingual-e5-small-aligned-quality
8
  results: []
9
  ---
10
 
11
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
  should probably proofread and complete it, then remove this comment. -->
13
 
14
- # multilingual-e5-small-aligned-quality
15
 
16
  This model is a fine-tuned version of [agentlans/multilingual-e5-small-aligned](https://huggingface.co/agentlans/multilingual-e5-small-aligned) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
- - Loss: 0.2512
19
- - Mse: 0.2512
20
 
21
  ## Model description
22
 
@@ -36,7 +36,7 @@ More information needed
36
 
37
  The following hyperparameters were used during training:
38
  - learning_rate: 5e-05
39
- - train_batch_size: 64
40
  - eval_batch_size: 8
41
  - seed: 42
42
  - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
@@ -47,9 +47,9 @@ The following hyperparameters were used during training:
47
 
48
  | Training Loss | Epoch | Step | Validation Loss | Mse |
49
  |:-------------:|:-----:|:-----:|:---------------:|:------:|
50
- | 0.283 | 1.0 | 13548 | 0.2798 | 0.2798 |
51
- | 0.2212 | 2.0 | 27096 | 0.2522 | 0.2522 |
52
- | 0.1801 | 3.0 | 40644 | 0.2512 | 0.2512 |
53
 
54
 
55
  ### Framework versions
 
4
  tags:
5
  - generated_from_trainer
6
  model-index:
7
+ - name: multilingual-e5-small-aligned-transformed-quality
8
  results: []
9
  ---
10
 
11
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
  should probably proofread and complete it, then remove this comment. -->
13
 
14
+ # multilingual-e5-small-aligned-transformed-quality
15
 
16
  This model is a fine-tuned version of [agentlans/multilingual-e5-small-aligned](https://huggingface.co/agentlans/multilingual-e5-small-aligned) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
+ - Loss: 0.2604
19
+ - Mse: 0.2604
20
 
21
  ## Model description
22
 
 
36
 
37
  The following hyperparameters were used during training:
38
  - learning_rate: 5e-05
39
+ - train_batch_size: 32
40
  - eval_batch_size: 8
41
  - seed: 42
42
  - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
 
47
 
48
  | Training Loss | Epoch | Step | Validation Loss | Mse |
49
  |:-------------:|:-----:|:-----:|:---------------:|:------:|
50
+ | 0.2949 | 1.0 | 27096 | 0.2873 | 0.2873 |
51
+ | 0.2239 | 2.0 | 54192 | 0.2671 | 0.2671 |
52
+ | 0.1789 | 3.0 | 81288 | 0.2604 | 0.2604 |
53
 
54
 
55
  ### Framework versions
all_results.json CHANGED
@@ -1,15 +1,15 @@
1
  {
2
  "epoch": 3.0,
3
- "eval_loss": 0.2512021064758301,
4
- "eval_mse": 0.2512021179375455,
5
- "eval_runtime": 60.7595,
6
  "eval_samples": 96338,
7
- "eval_samples_per_second": 1585.562,
8
- "eval_steps_per_second": 198.208,
9
  "total_flos": 4.283504864539085e+16,
10
- "train_loss": 0.23920188012548746,
11
- "train_runtime": 3164.8098,
12
  "train_samples": 867042,
13
- "train_samples_per_second": 821.89,
14
- "train_steps_per_second": 12.842
15
  }
 
1
  {
2
  "epoch": 3.0,
3
+ "eval_loss": 0.26044028997421265,
4
+ "eval_mse": 0.2604402849126996,
5
+ "eval_runtime": 53.5863,
6
  "eval_samples": 96338,
7
+ "eval_samples_per_second": 1797.81,
8
+ "eval_steps_per_second": 224.74,
9
  "total_flos": 4.283504864539085e+16,
10
+ "train_loss": 0.24568356713046358,
11
+ "train_runtime": 4418.2249,
12
  "train_samples": 867042,
13
+ "train_samples_per_second": 588.726,
14
+ "train_steps_per_second": 18.398
15
  }
eval_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 3.0,
3
- "eval_loss": 0.2512021064758301,
4
- "eval_mse": 0.2512021179375455,
5
- "eval_runtime": 60.7595,
6
  "eval_samples": 96338,
7
- "eval_samples_per_second": 1585.562,
8
- "eval_steps_per_second": 198.208
9
  }
 
1
  {
2
  "epoch": 3.0,
3
+ "eval_loss": 0.26044028997421265,
4
+ "eval_mse": 0.2604402849126996,
5
+ "eval_runtime": 53.5863,
6
  "eval_samples": 96338,
7
+ "eval_samples_per_second": 1797.81,
8
+ "eval_steps_per_second": 224.74
9
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:725a4a8c077941f3be577551f89cb0871f1441cde05ae499be76d82f4899bd7c
3
  size 470640124
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4f0eb3cb08c59ac8a80384938cb076a85795887de939c5f213c14fdd935883f
3
  size 470640124
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 3.0,
3
  "total_flos": 4.283504864539085e+16,
4
- "train_loss": 0.23920188012548746,
5
- "train_runtime": 3164.8098,
6
  "train_samples": 867042,
7
- "train_samples_per_second": 821.89,
8
- "train_steps_per_second": 12.842
9
  }
 
1
  {
2
  "epoch": 3.0,
3
  "total_flos": 4.283504864539085e+16,
4
+ "train_loss": 0.24568356713046358,
5
+ "train_runtime": 4418.2249,
6
  "train_samples": 867042,
7
+ "train_samples_per_second": 588.726,
8
+ "train_steps_per_second": 18.398
9
  }
trainer_state.json CHANGED
@@ -1,619 +1,1186 @@
1
  {
2
- "best_metric": 0.2512021064758301,
3
- "best_model_checkpoint": "multilingual-e5-small-aligned-quality/checkpoint-40644",
4
  "epoch": 3.0,
5
  "eval_steps": 500,
6
- "global_step": 40644,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
 
 
 
 
 
 
 
11
  {
12
  "epoch": 0.03690581635665781,
13
- "grad_norm": 5.902834892272949,
14
  "learning_rate": 4.938490306072237e-05,
15
- "loss": 0.4031,
16
- "step": 500
 
 
 
 
 
 
 
17
  },
18
  {
19
  "epoch": 0.07381163271331562,
20
- "grad_norm": 2.7090084552764893,
21
  "learning_rate": 4.876980612144474e-05,
22
- "loss": 0.3469,
23
- "step": 1000
 
 
 
 
 
 
 
24
  },
25
  {
26
  "epoch": 0.11071744906997343,
27
- "grad_norm": 2.5691611766815186,
28
  "learning_rate": 4.815470918216711e-05,
29
- "loss": 0.3437,
30
- "step": 1500
 
 
 
 
 
 
 
31
  },
32
  {
33
  "epoch": 0.14762326542663123,
34
- "grad_norm": 2.3187239170074463,
35
  "learning_rate": 4.7539612242889484e-05,
36
- "loss": 0.3309,
37
- "step": 2000
 
 
 
 
 
 
 
38
  },
39
  {
40
  "epoch": 0.18452908178328906,
41
- "grad_norm": 2.1093502044677734,
42
  "learning_rate": 4.692451530361185e-05,
43
- "loss": 0.3247,
44
- "step": 2500
 
 
 
 
 
 
 
45
  },
46
  {
47
  "epoch": 0.22143489813994685,
48
- "grad_norm": 1.9845925569534302,
49
  "learning_rate": 4.6309418364334224e-05,
50
- "loss": 0.3169,
51
- "step": 3000
 
 
 
 
 
 
 
52
  },
53
  {
54
  "epoch": 0.2583407144966047,
55
- "grad_norm": 2.292973756790161,
56
  "learning_rate": 4.5694321425056594e-05,
57
- "loss": 0.3197,
58
- "step": 3500
 
 
 
 
 
 
 
59
  },
60
  {
61
  "epoch": 0.29524653085326247,
62
- "grad_norm": 1.499457597732544,
63
  "learning_rate": 4.507922448577896e-05,
64
- "loss": 0.3142,
65
- "step": 4000
 
 
 
 
 
 
 
66
  },
67
  {
68
  "epoch": 0.33215234720992026,
69
- "grad_norm": 1.7365167140960693,
70
  "learning_rate": 4.4464127546501335e-05,
71
- "loss": 0.3138,
72
- "step": 4500
 
 
 
 
 
 
 
73
  },
74
  {
75
  "epoch": 0.3690581635665781,
76
- "grad_norm": 2.04133939743042,
77
  "learning_rate": 4.38490306072237e-05,
78
- "loss": 0.3132,
79
- "step": 5000
 
 
 
 
 
 
 
80
  },
81
  {
82
  "epoch": 0.4059639799232359,
83
- "grad_norm": 1.8704568147659302,
84
  "learning_rate": 4.323393366794607e-05,
85
- "loss": 0.3104,
86
- "step": 5500
 
 
 
 
 
 
 
87
  },
88
  {
89
  "epoch": 0.4428697962798937,
90
- "grad_norm": 2.452059268951416,
91
  "learning_rate": 4.261883672866844e-05,
92
- "loss": 0.3046,
93
- "step": 6000
 
 
 
 
 
 
 
94
  },
95
  {
96
  "epoch": 0.4797756126365515,
97
- "grad_norm": 2.5406882762908936,
98
  "learning_rate": 4.200373978939081e-05,
99
- "loss": 0.2989,
100
- "step": 6500
 
 
 
 
 
 
 
101
  },
102
  {
103
  "epoch": 0.5166814289932093,
104
- "grad_norm": 1.574673056602478,
105
  "learning_rate": 4.138864285011318e-05,
106
- "loss": 0.3005,
107
- "step": 7000
 
 
 
 
 
 
 
108
  },
109
  {
110
  "epoch": 0.5535872453498671,
111
- "grad_norm": 2.1640470027923584,
112
  "learning_rate": 4.077354591083555e-05,
113
- "loss": 0.3013,
114
- "step": 7500
 
 
 
 
 
 
 
115
  },
116
  {
117
  "epoch": 0.5904930617065249,
118
- "grad_norm": 2.8707878589630127,
119
  "learning_rate": 4.015844897155792e-05,
120
- "loss": 0.2981,
121
- "step": 8000
 
 
 
 
 
 
 
122
  },
123
  {
124
  "epoch": 0.6273988780631827,
125
- "grad_norm": 2.8684544563293457,
126
  "learning_rate": 3.954335203228029e-05,
127
- "loss": 0.2979,
128
- "step": 8500
 
 
 
 
 
 
 
129
  },
130
  {
131
  "epoch": 0.6643046944198405,
132
- "grad_norm": 2.2373464107513428,
133
  "learning_rate": 3.892825509300266e-05,
134
- "loss": 0.288,
135
- "step": 9000
 
 
 
 
 
 
 
136
  },
137
  {
138
  "epoch": 0.7012105107764984,
139
- "grad_norm": 2.0412282943725586,
140
  "learning_rate": 3.8313158153725024e-05,
141
- "loss": 0.292,
142
- "step": 9500
 
 
 
 
 
 
 
143
  },
144
  {
145
  "epoch": 0.7381163271331562,
146
- "grad_norm": 2.2188100814819336,
147
  "learning_rate": 3.76980612144474e-05,
148
- "loss": 0.2909,
149
- "step": 10000
 
 
 
 
 
 
 
150
  },
151
  {
152
  "epoch": 0.775022143489814,
153
- "grad_norm": 1.4839853048324585,
154
  "learning_rate": 3.708296427516977e-05,
155
- "loss": 0.2851,
156
- "step": 10500
 
 
 
 
 
 
 
157
  },
158
  {
159
  "epoch": 0.8119279598464718,
160
- "grad_norm": 1.4828788042068481,
161
  "learning_rate": 3.6467867335892135e-05,
162
- "loss": 0.2894,
163
- "step": 11000
 
 
 
 
 
 
 
164
  },
165
  {
166
  "epoch": 0.8488337762031296,
167
- "grad_norm": 1.8619405031204224,
168
  "learning_rate": 3.585277039661451e-05,
169
- "loss": 0.2885,
170
- "step": 11500
 
 
 
 
 
 
 
171
  },
172
  {
173
  "epoch": 0.8857395925597874,
174
- "grad_norm": 1.9477214813232422,
175
  "learning_rate": 3.5237673457336876e-05,
176
- "loss": 0.2813,
177
- "step": 12000
 
 
 
 
 
 
 
178
  },
179
  {
180
  "epoch": 0.9226454089164452,
181
- "grad_norm": 2.7381999492645264,
182
  "learning_rate": 3.4622576518059246e-05,
183
- "loss": 0.284,
184
- "step": 12500
 
 
 
 
 
 
 
185
  },
186
  {
187
  "epoch": 0.959551225273103,
188
- "grad_norm": 1.8171463012695312,
189
  "learning_rate": 3.400747957878162e-05,
190
- "loss": 0.2811,
191
- "step": 13000
 
 
 
 
 
 
 
192
  },
193
  {
194
  "epoch": 0.9964570416297609,
195
- "grad_norm": 2.59826922416687,
196
  "learning_rate": 3.3392382639503986e-05,
197
- "loss": 0.283,
198
- "step": 13500
199
  },
200
  {
201
  "epoch": 1.0,
202
- "eval_loss": 0.27975523471832275,
203
- "eval_mse": 0.27975526814178425,
204
- "eval_runtime": 52.0159,
205
- "eval_samples_per_second": 1852.088,
206
- "eval_steps_per_second": 231.525,
207
- "step": 13548
 
 
 
 
 
 
 
208
  },
209
  {
210
  "epoch": 1.0333628579864187,
211
- "grad_norm": 3.655153751373291,
212
  "learning_rate": 3.277728570022636e-05,
213
- "loss": 0.236,
214
- "step": 14000
 
 
 
 
 
 
 
215
  },
216
  {
217
  "epoch": 1.0702686743430765,
218
- "grad_norm": 2.276049852371216,
219
  "learning_rate": 3.216218876094873e-05,
220
- "loss": 0.2364,
221
- "step": 14500
 
 
 
 
 
 
 
222
  },
223
  {
224
  "epoch": 1.1071744906997343,
225
- "grad_norm": 1.4967354536056519,
226
  "learning_rate": 3.15470918216711e-05,
227
- "loss": 0.2317,
228
- "step": 15000
 
 
 
 
 
 
 
229
  },
230
  {
231
  "epoch": 1.144080307056392,
232
- "grad_norm": 1.4636516571044922,
233
  "learning_rate": 3.093199488239347e-05,
234
- "loss": 0.2342,
235
- "step": 15500
 
 
 
 
 
 
 
236
  },
237
  {
238
  "epoch": 1.1809861234130499,
239
- "grad_norm": 2.246140956878662,
240
  "learning_rate": 3.0316897943115834e-05,
241
- "loss": 0.2288,
242
- "step": 16000
 
 
 
 
 
 
 
243
  },
244
  {
245
  "epoch": 1.2178919397697077,
246
- "grad_norm": 1.4207803010940552,
247
  "learning_rate": 2.9701801003838208e-05,
248
- "loss": 0.2302,
249
- "step": 16500
 
 
 
 
 
 
 
250
  },
251
  {
252
  "epoch": 1.2547977561263655,
253
- "grad_norm": 2.0020480155944824,
254
  "learning_rate": 2.9086704064560578e-05,
255
- "loss": 0.2331,
256
- "step": 17000
 
 
 
 
 
 
 
257
  },
258
  {
259
  "epoch": 1.2917035724830233,
260
- "grad_norm": 1.7502425909042358,
261
  "learning_rate": 2.8471607125282945e-05,
262
- "loss": 0.2296,
263
- "step": 17500
 
 
 
 
 
 
 
264
  },
265
  {
266
  "epoch": 1.328609388839681,
267
- "grad_norm": 1.819958209991455,
268
  "learning_rate": 2.7856510186005312e-05,
269
- "loss": 0.2346,
270
- "step": 18000
 
 
 
 
 
 
 
271
  },
272
  {
273
  "epoch": 1.3655152051963388,
274
- "grad_norm": 2.5178093910217285,
275
  "learning_rate": 2.7241413246727686e-05,
276
- "loss": 0.2291,
277
- "step": 18500
 
 
 
 
 
 
 
278
  },
279
  {
280
  "epoch": 1.4024210215529966,
281
- "grad_norm": 1.7607210874557495,
282
  "learning_rate": 2.6626316307450056e-05,
283
- "loss": 0.2266,
284
- "step": 19000
 
 
 
 
 
 
 
285
  },
286
  {
287
  "epoch": 1.4393268379096544,
288
- "grad_norm": 2.5194263458251953,
289
  "learning_rate": 2.6011219368172423e-05,
290
- "loss": 0.2292,
291
- "step": 19500
 
 
 
 
 
 
 
292
  },
293
  {
294
  "epoch": 1.4762326542663124,
295
- "grad_norm": 1.6286081075668335,
296
  "learning_rate": 2.5396122428894797e-05,
297
- "loss": 0.2298,
298
- "step": 20000
 
 
 
 
 
 
 
299
  },
300
  {
301
  "epoch": 1.51313847062297,
302
- "grad_norm": 2.4123353958129883,
303
  "learning_rate": 2.4781025489617167e-05,
304
- "loss": 0.2283,
305
- "step": 20500
 
 
 
 
 
 
 
306
  },
307
  {
308
  "epoch": 1.550044286979628,
309
- "grad_norm": 1.9285629987716675,
310
  "learning_rate": 2.4165928550339534e-05,
311
- "loss": 0.2263,
312
- "step": 21000
 
 
 
 
 
 
 
313
  },
314
  {
315
  "epoch": 1.5869501033362858,
316
- "grad_norm": 2.4015371799468994,
317
  "learning_rate": 2.3550831611061904e-05,
318
- "loss": 0.2265,
319
- "step": 21500
 
 
 
 
 
 
 
320
  },
321
  {
322
  "epoch": 1.6238559196929436,
323
- "grad_norm": 1.3897498846054077,
324
  "learning_rate": 2.2935734671784274e-05,
325
- "loss": 0.2279,
326
- "step": 22000
 
 
 
 
 
 
 
327
  },
328
  {
329
  "epoch": 1.6607617360496014,
330
- "grad_norm": 1.909913182258606,
331
  "learning_rate": 2.2320637732506645e-05,
332
- "loss": 0.2235,
333
- "step": 22500
 
 
 
 
 
 
 
334
  },
335
  {
336
  "epoch": 1.6976675524062592,
337
- "grad_norm": 2.007033586502075,
338
  "learning_rate": 2.1705540793229015e-05,
339
- "loss": 0.2226,
340
- "step": 23000
 
 
 
 
 
 
 
341
  },
342
  {
343
  "epoch": 1.734573368762917,
344
- "grad_norm": 1.4410597085952759,
345
  "learning_rate": 2.1090443853951382e-05,
346
- "loss": 0.2237,
347
- "step": 23500
 
 
 
 
 
 
 
348
  },
349
  {
350
  "epoch": 1.7714791851195748,
351
- "grad_norm": 1.568517804145813,
352
  "learning_rate": 2.0475346914673755e-05,
353
- "loss": 0.2236,
354
- "step": 24000
 
 
 
 
 
 
 
355
  },
356
  {
357
  "epoch": 1.8083850014762326,
358
- "grad_norm": 1.9290361404418945,
359
  "learning_rate": 1.9860249975396122e-05,
360
- "loss": 0.2245,
361
- "step": 24500
 
 
 
 
 
 
 
362
  },
363
  {
364
  "epoch": 1.8452908178328906,
365
- "grad_norm": 1.7693965435028076,
366
  "learning_rate": 1.9245153036118493e-05,
367
- "loss": 0.2219,
368
- "step": 25000
 
 
 
 
 
 
 
369
  },
370
  {
371
  "epoch": 1.8821966341895484,
372
- "grad_norm": 1.637657642364502,
373
  "learning_rate": 1.8630056096840863e-05,
374
- "loss": 0.2222,
375
- "step": 25500
 
 
 
 
 
 
 
376
  },
377
  {
378
  "epoch": 1.9191024505462062,
379
- "grad_norm": 1.8758279085159302,
380
  "learning_rate": 1.8014959157563233e-05,
381
- "loss": 0.2237,
382
- "step": 26000
 
 
 
 
 
 
 
383
  },
384
  {
385
  "epoch": 1.956008266902864,
386
- "grad_norm": 1.9924167394638062,
387
  "learning_rate": 1.7399862218285603e-05,
388
- "loss": 0.2221,
389
- "step": 26500
 
 
 
 
 
 
 
390
  },
391
  {
392
  "epoch": 1.9929140832595218,
393
- "grad_norm": 1.6823917627334595,
394
  "learning_rate": 1.678476527900797e-05,
395
- "loss": 0.2212,
396
- "step": 27000
397
  },
398
  {
399
  "epoch": 2.0,
400
- "eval_loss": 0.2521688938140869,
401
- "eval_mse": 0.2521688884473344,
402
- "eval_runtime": 52.0006,
403
- "eval_samples_per_second": 1852.634,
404
- "eval_steps_per_second": 231.594,
405
- "step": 27096
 
 
 
 
 
 
 
406
  },
407
  {
408
  "epoch": 2.0298198996161796,
409
- "grad_norm": 1.8799372911453247,
410
  "learning_rate": 1.6169668339730344e-05,
411
- "loss": 0.1923,
412
- "step": 27500
 
 
 
 
 
 
 
413
  },
414
  {
415
  "epoch": 2.0667257159728374,
416
- "grad_norm": 1.8323724269866943,
417
  "learning_rate": 1.555457140045271e-05,
418
- "loss": 0.1843,
419
- "step": 28000
 
 
 
 
 
 
 
420
  },
421
  {
422
  "epoch": 2.103631532329495,
423
- "grad_norm": 2.1051783561706543,
424
  "learning_rate": 1.4939474461175081e-05,
425
- "loss": 0.1828,
426
- "step": 28500
 
 
 
 
 
 
 
427
  },
428
  {
429
  "epoch": 2.140537348686153,
430
- "grad_norm": 1.5870431661605835,
431
  "learning_rate": 1.4324377521897453e-05,
432
- "loss": 0.1871,
433
- "step": 29000
 
 
 
 
 
 
 
434
  },
435
  {
436
  "epoch": 2.1774431650428108,
437
- "grad_norm": 1.8576958179473877,
438
  "learning_rate": 1.3709280582619822e-05,
439
- "loss": 0.1845,
440
- "step": 29500
 
 
 
 
 
 
 
441
  },
442
  {
443
  "epoch": 2.2143489813994686,
444
- "grad_norm": 1.5509694814682007,
445
  "learning_rate": 1.3094183643342192e-05,
446
- "loss": 0.1838,
447
- "step": 30000
 
 
 
 
 
 
 
448
  },
449
  {
450
  "epoch": 2.2512547977561264,
451
- "grad_norm": 1.8506149053573608,
452
  "learning_rate": 1.2479086704064562e-05,
453
- "loss": 0.1849,
454
- "step": 30500
 
 
 
 
 
 
 
455
  },
456
  {
457
  "epoch": 2.288160614112784,
458
- "grad_norm": 1.8075580596923828,
459
  "learning_rate": 1.186398976478693e-05,
460
- "loss": 0.1858,
461
- "step": 31000
 
 
 
 
 
 
 
462
  },
463
  {
464
  "epoch": 2.325066430469442,
465
- "grad_norm": 2.2976126670837402,
466
  "learning_rate": 1.1248892825509301e-05,
467
- "loss": 0.187,
468
- "step": 31500
 
 
 
 
 
 
 
469
  },
470
  {
471
  "epoch": 2.3619722468260997,
472
- "grad_norm": 2.127387046813965,
473
  "learning_rate": 1.0633795886231671e-05,
474
- "loss": 0.1851,
475
- "step": 32000
 
 
 
 
 
 
 
476
  },
477
  {
478
  "epoch": 2.3988780631827575,
479
- "grad_norm": 1.7915741205215454,
480
  "learning_rate": 1.001869894695404e-05,
481
- "loss": 0.1813,
482
- "step": 32500
 
 
 
 
 
 
 
483
  },
484
  {
485
  "epoch": 2.4357838795394153,
486
- "grad_norm": 2.1885006427764893,
487
  "learning_rate": 9.40360200767641e-06,
488
- "loss": 0.1807,
489
- "step": 33000
 
 
 
 
 
 
 
490
  },
491
  {
492
  "epoch": 2.472689695896073,
493
- "grad_norm": 2.7843916416168213,
494
  "learning_rate": 8.78850506839878e-06,
495
- "loss": 0.1839,
496
- "step": 33500
 
 
 
 
 
 
 
497
  },
498
  {
499
  "epoch": 2.509595512252731,
500
- "grad_norm": 1.519360899925232,
501
  "learning_rate": 8.17340812912115e-06,
502
- "loss": 0.1846,
503
- "step": 34000
 
 
 
 
 
 
 
504
  },
505
  {
506
  "epoch": 2.5465013286093887,
507
- "grad_norm": 1.867719292640686,
508
  "learning_rate": 7.55831118984352e-06,
509
- "loss": 0.1843,
510
- "step": 34500
 
 
 
 
 
 
 
511
  },
512
  {
513
  "epoch": 2.5834071449660465,
514
- "grad_norm": 1.8827580213546753,
515
  "learning_rate": 6.94321425056589e-06,
516
- "loss": 0.182,
517
- "step": 35000
 
 
 
 
 
 
 
518
  },
519
  {
520
  "epoch": 2.6203129613227043,
521
- "grad_norm": 2.268225908279419,
522
  "learning_rate": 6.328117311288259e-06,
523
- "loss": 0.1817,
524
- "step": 35500
 
 
 
 
 
 
 
525
  },
526
  {
527
  "epoch": 2.657218777679362,
528
- "grad_norm": 1.7755805253982544,
529
  "learning_rate": 5.713020372010629e-06,
 
 
 
 
 
 
 
530
  "loss": 0.1821,
531
- "step": 36000
532
  },
533
  {
534
  "epoch": 2.69412459403602,
535
- "grad_norm": 1.9568016529083252,
536
  "learning_rate": 5.097923432732999e-06,
537
- "loss": 0.18,
538
- "step": 36500
 
 
 
 
 
 
 
539
  },
540
  {
541
  "epoch": 2.7310304103926777,
542
- "grad_norm": 2.343839406967163,
543
  "learning_rate": 4.482826493455368e-06,
544
- "loss": 0.181,
545
- "step": 37000
 
 
 
 
 
 
 
546
  },
547
  {
548
  "epoch": 2.7679362267493355,
549
- "grad_norm": 2.2050397396087646,
550
  "learning_rate": 3.8677295541777385e-06,
551
- "loss": 0.1817,
552
- "step": 37500
 
 
 
 
 
 
 
553
  },
554
  {
555
  "epoch": 2.8048420431059933,
556
- "grad_norm": 1.7823182344436646,
557
  "learning_rate": 3.2526326149001084e-06,
558
- "loss": 0.1779,
559
- "step": 38000
 
 
 
 
 
 
 
560
  },
561
  {
562
  "epoch": 2.841747859462651,
563
- "grad_norm": 1.8498305082321167,
564
  "learning_rate": 2.6375356756224782e-06,
565
- "loss": 0.1819,
566
- "step": 38500
 
 
 
 
 
 
 
567
  },
568
  {
569
  "epoch": 2.878653675819309,
570
- "grad_norm": 2.2064967155456543,
571
  "learning_rate": 2.022438736344848e-06,
572
- "loss": 0.182,
573
- "step": 39000
 
 
 
 
 
 
 
574
  },
575
  {
576
  "epoch": 2.9155594921759667,
577
- "grad_norm": 2.3844711780548096,
578
  "learning_rate": 1.4073417970672177e-06,
579
- "loss": 0.1786,
580
- "step": 39500
 
 
 
 
 
 
 
581
  },
582
  {
583
  "epoch": 2.952465308532625,
584
- "grad_norm": 1.8031284809112549,
585
  "learning_rate": 7.922448577895876e-07,
586
- "loss": 0.1815,
587
- "step": 40000
 
 
 
 
 
 
 
588
  },
589
  {
590
  "epoch": 2.9893711248892827,
591
- "grad_norm": 1.6803677082061768,
592
  "learning_rate": 1.771479185119575e-07,
593
- "loss": 0.1801,
594
- "step": 40500
595
  },
596
  {
597
  "epoch": 3.0,
598
- "eval_loss": 0.2512021064758301,
599
- "eval_mse": 0.2512021179375455,
600
- "eval_runtime": 59.0402,
601
- "eval_samples_per_second": 1631.736,
602
- "eval_steps_per_second": 203.98,
603
- "step": 40644
604
  },
605
  {
606
  "epoch": 3.0,
607
- "step": 40644,
608
  "total_flos": 4.283504864539085e+16,
609
- "train_loss": 0.23920188012548746,
610
- "train_runtime": 3164.8098,
611
- "train_samples_per_second": 821.89,
612
- "train_steps_per_second": 12.842
613
  }
614
  ],
615
  "logging_steps": 500,
616
- "max_steps": 40644,
617
  "num_input_tokens_seen": 0,
618
  "num_train_epochs": 3,
619
  "save_steps": 500,
@@ -630,7 +1197,7 @@
630
  }
631
  },
632
  "total_flos": 4.283504864539085e+16,
633
- "train_batch_size": 64,
634
  "trial_name": null,
635
  "trial_params": null
636
  }
 
1
  {
2
+ "best_metric": 0.26044028997421265,
3
+ "best_model_checkpoint": "multilingual-e5-small-aligned-transformed-quality/checkpoint-81288",
4
  "epoch": 3.0,
5
  "eval_steps": 500,
6
+ "global_step": 81288,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
+ {
12
+ "epoch": 0.018452908178328904,
13
+ "grad_norm": 1.5689656734466553,
14
+ "learning_rate": 4.969245153036119e-05,
15
+ "loss": 0.4394,
16
+ "step": 500
17
+ },
18
  {
19
  "epoch": 0.03690581635665781,
20
+ "grad_norm": 3.1030237674713135,
21
  "learning_rate": 4.938490306072237e-05,
22
+ "loss": 0.3911,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.05535872453498671,
27
+ "grad_norm": 1.7122437953948975,
28
+ "learning_rate": 4.907735459108356e-05,
29
+ "loss": 0.3661,
30
+ "step": 1500
31
  },
32
  {
33
  "epoch": 0.07381163271331562,
34
+ "grad_norm": 4.44265079498291,
35
  "learning_rate": 4.876980612144474e-05,
36
+ "loss": 0.3665,
37
+ "step": 2000
38
+ },
39
+ {
40
+ "epoch": 0.09226454089164453,
41
+ "grad_norm": 3.232088088989258,
42
+ "learning_rate": 4.846225765180593e-05,
43
+ "loss": 0.3581,
44
+ "step": 2500
45
  },
46
  {
47
  "epoch": 0.11071744906997343,
48
+ "grad_norm": 4.422216415405273,
49
  "learning_rate": 4.815470918216711e-05,
50
+ "loss": 0.3606,
51
+ "step": 3000
52
+ },
53
+ {
54
+ "epoch": 0.12917035724830234,
55
+ "grad_norm": 2.604503631591797,
56
+ "learning_rate": 4.78471607125283e-05,
57
+ "loss": 0.3492,
58
+ "step": 3500
59
  },
60
  {
61
  "epoch": 0.14762326542663123,
62
+ "grad_norm": 1.7900758981704712,
63
  "learning_rate": 4.7539612242889484e-05,
64
+ "loss": 0.346,
65
+ "step": 4000
66
+ },
67
+ {
68
+ "epoch": 0.16607617360496013,
69
+ "grad_norm": 2.164018154144287,
70
+ "learning_rate": 4.723206377325067e-05,
71
+ "loss": 0.3405,
72
+ "step": 4500
73
  },
74
  {
75
  "epoch": 0.18452908178328906,
76
+ "grad_norm": 3.678737163543701,
77
  "learning_rate": 4.692451530361185e-05,
78
+ "loss": 0.342,
79
+ "step": 5000
80
+ },
81
+ {
82
+ "epoch": 0.20298198996161795,
83
+ "grad_norm": 3.0846383571624756,
84
+ "learning_rate": 4.661696683397304e-05,
85
+ "loss": 0.3323,
86
+ "step": 5500
87
  },
88
  {
89
  "epoch": 0.22143489813994685,
90
+ "grad_norm": 2.493079900741577,
91
  "learning_rate": 4.6309418364334224e-05,
92
+ "loss": 0.3403,
93
+ "step": 6000
94
+ },
95
+ {
96
+ "epoch": 0.23988780631827575,
97
+ "grad_norm": 3.3876466751098633,
98
+ "learning_rate": 4.60018698946954e-05,
99
+ "loss": 0.3398,
100
+ "step": 6500
101
  },
102
  {
103
  "epoch": 0.2583407144966047,
104
+ "grad_norm": 2.3498599529266357,
105
  "learning_rate": 4.5694321425056594e-05,
106
+ "loss": 0.3285,
107
+ "step": 7000
108
+ },
109
+ {
110
+ "epoch": 0.27679362267493357,
111
+ "grad_norm": 2.9931955337524414,
112
+ "learning_rate": 4.538677295541778e-05,
113
+ "loss": 0.3315,
114
+ "step": 7500
115
  },
116
  {
117
  "epoch": 0.29524653085326247,
118
+ "grad_norm": 2.410051107406616,
119
  "learning_rate": 4.507922448577896e-05,
120
+ "loss": 0.3245,
121
+ "step": 8000
122
+ },
123
+ {
124
+ "epoch": 0.31369943903159137,
125
+ "grad_norm": 2.8106865882873535,
126
+ "learning_rate": 4.477167601614014e-05,
127
+ "loss": 0.3357,
128
+ "step": 8500
129
  },
130
  {
131
  "epoch": 0.33215234720992026,
132
+ "grad_norm": 1.4903161525726318,
133
  "learning_rate": 4.4464127546501335e-05,
134
+ "loss": 0.3267,
135
+ "step": 9000
136
+ },
137
+ {
138
+ "epoch": 0.3506052553882492,
139
+ "grad_norm": 2.8870410919189453,
140
+ "learning_rate": 4.415657907686251e-05,
141
+ "loss": 0.3276,
142
+ "step": 9500
143
  },
144
  {
145
  "epoch": 0.3690581635665781,
146
+ "grad_norm": 3.0932767391204834,
147
  "learning_rate": 4.38490306072237e-05,
148
+ "loss": 0.3287,
149
+ "step": 10000
150
+ },
151
+ {
152
+ "epoch": 0.387511071744907,
153
+ "grad_norm": 2.0650179386138916,
154
+ "learning_rate": 4.354148213758489e-05,
155
+ "loss": 0.3242,
156
+ "step": 10500
157
  },
158
  {
159
  "epoch": 0.4059639799232359,
160
+ "grad_norm": 2.1519172191619873,
161
  "learning_rate": 4.323393366794607e-05,
162
+ "loss": 0.3227,
163
+ "step": 11000
164
+ },
165
+ {
166
+ "epoch": 0.4244168881015648,
167
+ "grad_norm": 2.222637176513672,
168
+ "learning_rate": 4.2926385198307254e-05,
169
+ "loss": 0.3231,
170
+ "step": 11500
171
  },
172
  {
173
  "epoch": 0.4428697962798937,
174
+ "grad_norm": 3.189814805984497,
175
  "learning_rate": 4.261883672866844e-05,
176
+ "loss": 0.3179,
177
+ "step": 12000
178
+ },
179
+ {
180
+ "epoch": 0.4613227044582226,
181
+ "grad_norm": 2.6109983921051025,
182
+ "learning_rate": 4.2311288259029624e-05,
183
+ "loss": 0.3179,
184
+ "step": 12500
185
  },
186
  {
187
  "epoch": 0.4797756126365515,
188
+ "grad_norm": 2.1001720428466797,
189
  "learning_rate": 4.200373978939081e-05,
190
+ "loss": 0.3156,
191
+ "step": 13000
192
+ },
193
+ {
194
+ "epoch": 0.49822852081488045,
195
+ "grad_norm": 1.8664593696594238,
196
+ "learning_rate": 4.1696191319751994e-05,
197
+ "loss": 0.3135,
198
+ "step": 13500
199
  },
200
  {
201
  "epoch": 0.5166814289932093,
202
+ "grad_norm": 2.5857434272766113,
203
  "learning_rate": 4.138864285011318e-05,
204
+ "loss": 0.3129,
205
+ "step": 14000
206
+ },
207
+ {
208
+ "epoch": 0.5351343371715382,
209
+ "grad_norm": 1.891746997833252,
210
+ "learning_rate": 4.1081094380474365e-05,
211
+ "loss": 0.3134,
212
+ "step": 14500
213
  },
214
  {
215
  "epoch": 0.5535872453498671,
216
+ "grad_norm": 2.4689295291900635,
217
  "learning_rate": 4.077354591083555e-05,
218
+ "loss": 0.3186,
219
+ "step": 15000
220
+ },
221
+ {
222
+ "epoch": 0.572040153528196,
223
+ "grad_norm": 2.3689334392547607,
224
+ "learning_rate": 4.0465997441196735e-05,
225
+ "loss": 0.3109,
226
+ "step": 15500
227
  },
228
  {
229
  "epoch": 0.5904930617065249,
230
+ "grad_norm": 3.297631025314331,
231
  "learning_rate": 4.015844897155792e-05,
232
+ "loss": 0.3115,
233
+ "step": 16000
234
+ },
235
+ {
236
+ "epoch": 0.6089459698848538,
237
+ "grad_norm": 3.3874332904815674,
238
+ "learning_rate": 3.9850900501919105e-05,
239
+ "loss": 0.312,
240
+ "step": 16500
241
  },
242
  {
243
  "epoch": 0.6273988780631827,
244
+ "grad_norm": 2.9138197898864746,
245
  "learning_rate": 3.954335203228029e-05,
246
+ "loss": 0.3126,
247
+ "step": 17000
248
+ },
249
+ {
250
+ "epoch": 0.6458517862415116,
251
+ "grad_norm": 2.6313183307647705,
252
+ "learning_rate": 3.9235803562641475e-05,
253
+ "loss": 0.3013,
254
+ "step": 17500
255
  },
256
  {
257
  "epoch": 0.6643046944198405,
258
+ "grad_norm": 2.1537322998046875,
259
  "learning_rate": 3.892825509300266e-05,
260
+ "loss": 0.3045,
261
+ "step": 18000
262
+ },
263
+ {
264
+ "epoch": 0.6827576025981694,
265
+ "grad_norm": 2.0563058853149414,
266
+ "learning_rate": 3.8620706623363846e-05,
267
+ "loss": 0.3096,
268
+ "step": 18500
269
  },
270
  {
271
  "epoch": 0.7012105107764984,
272
+ "grad_norm": 3.172734498977661,
273
  "learning_rate": 3.8313158153725024e-05,
274
+ "loss": 0.3037,
275
+ "step": 19000
276
+ },
277
+ {
278
+ "epoch": 0.7196634189548273,
279
+ "grad_norm": 2.6458899974823,
280
+ "learning_rate": 3.8005609684086216e-05,
281
+ "loss": 0.3086,
282
+ "step": 19500
283
  },
284
  {
285
  "epoch": 0.7381163271331562,
286
+ "grad_norm": 3.355177879333496,
287
  "learning_rate": 3.76980612144474e-05,
288
+ "loss": 0.3002,
289
+ "step": 20000
290
+ },
291
+ {
292
+ "epoch": 0.7565692353114851,
293
+ "grad_norm": 2.290302038192749,
294
+ "learning_rate": 3.739051274480858e-05,
295
+ "loss": 0.3031,
296
+ "step": 20500
297
  },
298
  {
299
  "epoch": 0.775022143489814,
300
+ "grad_norm": 2.995771884918213,
301
  "learning_rate": 3.708296427516977e-05,
302
+ "loss": 0.2953,
303
+ "step": 21000
304
+ },
305
+ {
306
+ "epoch": 0.7934750516681429,
307
+ "grad_norm": 2.9768190383911133,
308
+ "learning_rate": 3.6775415805530957e-05,
309
+ "loss": 0.3091,
310
+ "step": 21500
311
  },
312
  {
313
  "epoch": 0.8119279598464718,
314
+ "grad_norm": 2.7021236419677734,
315
  "learning_rate": 3.6467867335892135e-05,
316
+ "loss": 0.292,
317
+ "step": 22000
318
+ },
319
+ {
320
+ "epoch": 0.8303808680248007,
321
+ "grad_norm": 4.4983229637146,
322
+ "learning_rate": 3.616031886625332e-05,
323
+ "loss": 0.3024,
324
+ "step": 22500
325
  },
326
  {
327
  "epoch": 0.8488337762031296,
328
+ "grad_norm": 2.2046918869018555,
329
  "learning_rate": 3.585277039661451e-05,
330
+ "loss": 0.3031,
331
+ "step": 23000
332
+ },
333
+ {
334
+ "epoch": 0.8672866843814585,
335
+ "grad_norm": 3.147702932357788,
336
+ "learning_rate": 3.554522192697569e-05,
337
+ "loss": 0.2923,
338
+ "step": 23500
339
  },
340
  {
341
  "epoch": 0.8857395925597874,
342
+ "grad_norm": 2.4293417930603027,
343
  "learning_rate": 3.5237673457336876e-05,
344
+ "loss": 0.2962,
345
+ "step": 24000
346
+ },
347
+ {
348
+ "epoch": 0.9041925007381163,
349
+ "grad_norm": 1.7400178909301758,
350
+ "learning_rate": 3.493012498769807e-05,
351
+ "loss": 0.3017,
352
+ "step": 24500
353
  },
354
  {
355
  "epoch": 0.9226454089164452,
356
+ "grad_norm": 2.229633331298828,
357
  "learning_rate": 3.4622576518059246e-05,
358
+ "loss": 0.2961,
359
+ "step": 25000
360
+ },
361
+ {
362
+ "epoch": 0.9410983170947741,
363
+ "grad_norm": 2.479665517807007,
364
+ "learning_rate": 3.431502804842043e-05,
365
+ "loss": 0.2961,
366
+ "step": 25500
367
  },
368
  {
369
  "epoch": 0.959551225273103,
370
+ "grad_norm": 2.990281820297241,
371
  "learning_rate": 3.400747957878162e-05,
372
+ "loss": 0.2932,
373
+ "step": 26000
374
+ },
375
+ {
376
+ "epoch": 0.978004133451432,
377
+ "grad_norm": 2.545665979385376,
378
+ "learning_rate": 3.36999311091428e-05,
379
+ "loss": 0.3006,
380
+ "step": 26500
381
  },
382
  {
383
  "epoch": 0.9964570416297609,
384
+ "grad_norm": 3.353132486343384,
385
  "learning_rate": 3.3392382639503986e-05,
386
+ "loss": 0.2949,
387
+ "step": 27000
388
  },
389
  {
390
  "epoch": 1.0,
391
+ "eval_loss": 0.2873324751853943,
392
+ "eval_mse": 0.28733249980634695,
393
+ "eval_runtime": 59.3439,
394
+ "eval_samples_per_second": 1623.385,
395
+ "eval_steps_per_second": 202.936,
396
+ "step": 27096
397
+ },
398
+ {
399
+ "epoch": 1.0149099498080898,
400
+ "grad_norm": 1.8382848501205444,
401
+ "learning_rate": 3.308483416986517e-05,
402
+ "loss": 0.2532,
403
+ "step": 27500
404
  },
405
  {
406
  "epoch": 1.0333628579864187,
407
+ "grad_norm": 2.252268075942993,
408
  "learning_rate": 3.277728570022636e-05,
409
+ "loss": 0.2366,
410
+ "step": 28000
411
+ },
412
+ {
413
+ "epoch": 1.0518157661647476,
414
+ "grad_norm": 2.4993882179260254,
415
+ "learning_rate": 3.246973723058754e-05,
416
+ "loss": 0.2431,
417
+ "step": 28500
418
  },
419
  {
420
  "epoch": 1.0702686743430765,
421
+ "grad_norm": 2.5144221782684326,
422
  "learning_rate": 3.216218876094873e-05,
423
+ "loss": 0.2421,
424
+ "step": 29000
425
+ },
426
+ {
427
+ "epoch": 1.0887215825214054,
428
+ "grad_norm": 2.1316707134246826,
429
+ "learning_rate": 3.185464029130991e-05,
430
+ "loss": 0.2401,
431
+ "step": 29500
432
  },
433
  {
434
  "epoch": 1.1071744906997343,
435
+ "grad_norm": 2.0435242652893066,
436
  "learning_rate": 3.15470918216711e-05,
437
+ "loss": 0.2339,
438
+ "step": 30000
439
+ },
440
+ {
441
+ "epoch": 1.1256273988780632,
442
+ "grad_norm": 3.039565086364746,
443
+ "learning_rate": 3.123954335203228e-05,
444
+ "loss": 0.2424,
445
+ "step": 30500
446
  },
447
  {
448
  "epoch": 1.144080307056392,
449
+ "grad_norm": 2.4302680492401123,
450
  "learning_rate": 3.093199488239347e-05,
451
+ "loss": 0.239,
452
+ "step": 31000
453
+ },
454
+ {
455
+ "epoch": 1.162533215234721,
456
+ "grad_norm": 2.2677814960479736,
457
+ "learning_rate": 3.062444641275465e-05,
458
+ "loss": 0.2394,
459
+ "step": 31500
460
  },
461
  {
462
  "epoch": 1.1809861234130499,
463
+ "grad_norm": 3.0901999473571777,
464
  "learning_rate": 3.0316897943115834e-05,
465
+ "loss": 0.2323,
466
+ "step": 32000
467
+ },
468
+ {
469
+ "epoch": 1.1994390315913788,
470
+ "grad_norm": 2.9839725494384766,
471
+ "learning_rate": 3.0009349473477023e-05,
472
+ "loss": 0.2387,
473
+ "step": 32500
474
  },
475
  {
476
  "epoch": 1.2178919397697077,
477
+ "grad_norm": 2.2191145420074463,
478
  "learning_rate": 2.9701801003838208e-05,
479
+ "loss": 0.2388,
480
+ "step": 33000
481
+ },
482
+ {
483
+ "epoch": 1.2363448479480366,
484
+ "grad_norm": 2.5112791061401367,
485
+ "learning_rate": 2.939425253419939e-05,
486
+ "loss": 0.2378,
487
+ "step": 33500
488
  },
489
  {
490
  "epoch": 1.2547977561263655,
491
+ "grad_norm": 2.370722770690918,
492
  "learning_rate": 2.9086704064560578e-05,
493
+ "loss": 0.2414,
494
+ "step": 34000
495
+ },
496
+ {
497
+ "epoch": 1.2732506643046944,
498
+ "grad_norm": 3.031116247177124,
499
+ "learning_rate": 2.877915559492176e-05,
500
+ "loss": 0.2349,
501
+ "step": 34500
502
  },
503
  {
504
  "epoch": 1.2917035724830233,
505
+ "grad_norm": 3.356889247894287,
506
  "learning_rate": 2.8471607125282945e-05,
507
+ "loss": 0.2364,
508
+ "step": 35000
509
+ },
510
+ {
511
+ "epoch": 1.3101564806613522,
512
+ "grad_norm": 3.58608078956604,
513
+ "learning_rate": 2.8164058655644134e-05,
514
+ "loss": 0.2435,
515
+ "step": 35500
516
  },
517
  {
518
  "epoch": 1.328609388839681,
519
+ "grad_norm": 3.02878999710083,
520
  "learning_rate": 2.7856510186005312e-05,
521
+ "loss": 0.2419,
522
+ "step": 36000
523
+ },
524
+ {
525
+ "epoch": 1.34706229701801,
526
+ "grad_norm": 2.696791648864746,
527
+ "learning_rate": 2.75489617163665e-05,
528
+ "loss": 0.2335,
529
+ "step": 36500
530
  },
531
  {
532
  "epoch": 1.3655152051963388,
533
+ "grad_norm": 3.62918758392334,
534
  "learning_rate": 2.7241413246727686e-05,
535
+ "loss": 0.2335,
536
+ "step": 37000
537
+ },
538
+ {
539
+ "epoch": 1.3839681133746677,
540
+ "grad_norm": 2.1558516025543213,
541
+ "learning_rate": 2.6933864777088867e-05,
542
+ "loss": 0.2312,
543
+ "step": 37500
544
  },
545
  {
546
  "epoch": 1.4024210215529966,
547
+ "grad_norm": 2.8393824100494385,
548
  "learning_rate": 2.6626316307450056e-05,
549
+ "loss": 0.2366,
550
+ "step": 38000
551
+ },
552
+ {
553
+ "epoch": 1.4208739297313255,
554
+ "grad_norm": 4.011019229888916,
555
+ "learning_rate": 2.631876783781124e-05,
556
+ "loss": 0.2341,
557
+ "step": 38500
558
  },
559
  {
560
  "epoch": 1.4393268379096544,
561
+ "grad_norm": 2.1811978816986084,
562
  "learning_rate": 2.6011219368172423e-05,
563
+ "loss": 0.2367,
564
+ "step": 39000
565
+ },
566
+ {
567
+ "epoch": 1.4577797460879833,
568
+ "grad_norm": 2.3741202354431152,
569
+ "learning_rate": 2.570367089853361e-05,
570
+ "loss": 0.2357,
571
+ "step": 39500
572
  },
573
  {
574
  "epoch": 1.4762326542663124,
575
+ "grad_norm": 1.7879256010055542,
576
  "learning_rate": 2.5396122428894797e-05,
577
+ "loss": 0.2383,
578
+ "step": 40000
579
+ },
580
+ {
581
+ "epoch": 1.4946855624446413,
582
+ "grad_norm": 2.9185972213745117,
583
+ "learning_rate": 2.508857395925598e-05,
584
+ "loss": 0.2383,
585
+ "step": 40500
586
  },
587
  {
588
  "epoch": 1.51313847062297,
589
+ "grad_norm": 2.2389113903045654,
590
  "learning_rate": 2.4781025489617167e-05,
591
+ "loss": 0.2365,
592
+ "step": 41000
593
+ },
594
+ {
595
+ "epoch": 1.531591378801299,
596
+ "grad_norm": 3.312300205230713,
597
+ "learning_rate": 2.447347701997835e-05,
598
+ "loss": 0.235,
599
+ "step": 41500
600
  },
601
  {
602
  "epoch": 1.550044286979628,
603
+ "grad_norm": 1.933483362197876,
604
  "learning_rate": 2.4165928550339534e-05,
605
+ "loss": 0.2328,
606
+ "step": 42000
607
+ },
608
+ {
609
+ "epoch": 1.568497195157957,
610
+ "grad_norm": 3.212892532348633,
611
+ "learning_rate": 2.3858380080700722e-05,
612
+ "loss": 0.2298,
613
+ "step": 42500
614
  },
615
  {
616
  "epoch": 1.5869501033362858,
617
+ "grad_norm": 2.561583995819092,
618
  "learning_rate": 2.3550831611061904e-05,
619
+ "loss": 0.2334,
620
+ "step": 43000
621
+ },
622
+ {
623
+ "epoch": 1.6054030115146147,
624
+ "grad_norm": 3.2696096897125244,
625
+ "learning_rate": 2.324328314142309e-05,
626
+ "loss": 0.239,
627
+ "step": 43500
628
  },
629
  {
630
  "epoch": 1.6238559196929436,
631
+ "grad_norm": 2.1827101707458496,
632
  "learning_rate": 2.2935734671784274e-05,
633
+ "loss": 0.2305,
634
+ "step": 44000
635
+ },
636
+ {
637
+ "epoch": 1.6423088278712725,
638
+ "grad_norm": 2.3663787841796875,
639
+ "learning_rate": 2.262818620214546e-05,
640
+ "loss": 0.2282,
641
+ "step": 44500
642
  },
643
  {
644
  "epoch": 1.6607617360496014,
645
+ "grad_norm": 2.3439533710479736,
646
  "learning_rate": 2.2320637732506645e-05,
647
+ "loss": 0.2319,
648
+ "step": 45000
649
+ },
650
+ {
651
+ "epoch": 1.6792146442279303,
652
+ "grad_norm": 1.9831467866897583,
653
+ "learning_rate": 2.201308926286783e-05,
654
+ "loss": 0.2301,
655
+ "step": 45500
656
  },
657
  {
658
  "epoch": 1.6976675524062592,
659
+ "grad_norm": 2.6936724185943604,
660
  "learning_rate": 2.1705540793229015e-05,
661
+ "loss": 0.229,
662
+ "step": 46000
663
+ },
664
+ {
665
+ "epoch": 1.7161204605845881,
666
+ "grad_norm": 2.882948160171509,
667
+ "learning_rate": 2.13979923235902e-05,
668
+ "loss": 0.2312,
669
+ "step": 46500
670
  },
671
  {
672
  "epoch": 1.734573368762917,
673
+ "grad_norm": 2.604907274246216,
674
  "learning_rate": 2.1090443853951382e-05,
675
+ "loss": 0.2251,
676
+ "step": 47000
677
+ },
678
+ {
679
+ "epoch": 1.753026276941246,
680
+ "grad_norm": 2.0118274688720703,
681
+ "learning_rate": 2.0782895384312567e-05,
682
+ "loss": 0.2298,
683
+ "step": 47500
684
  },
685
  {
686
  "epoch": 1.7714791851195748,
687
+ "grad_norm": 3.394923448562622,
688
  "learning_rate": 2.0475346914673755e-05,
689
+ "loss": 0.235,
690
+ "step": 48000
691
+ },
692
+ {
693
+ "epoch": 1.7899320932979037,
694
+ "grad_norm": 2.554750919342041,
695
+ "learning_rate": 2.0167798445034937e-05,
696
+ "loss": 0.2336,
697
+ "step": 48500
698
  },
699
  {
700
  "epoch": 1.8083850014762326,
701
+ "grad_norm": 2.264559030532837,
702
  "learning_rate": 1.9860249975396122e-05,
703
+ "loss": 0.2252,
704
+ "step": 49000
705
+ },
706
+ {
707
+ "epoch": 1.8268379096545617,
708
+ "grad_norm": 3.1084542274475098,
709
+ "learning_rate": 1.955270150575731e-05,
710
+ "loss": 0.228,
711
+ "step": 49500
712
  },
713
  {
714
  "epoch": 1.8452908178328906,
715
+ "grad_norm": 3.497529983520508,
716
  "learning_rate": 1.9245153036118493e-05,
717
+ "loss": 0.2262,
718
+ "step": 50000
719
+ },
720
+ {
721
+ "epoch": 1.8637437260112195,
722
+ "grad_norm": 2.686687707901001,
723
+ "learning_rate": 1.8937604566479678e-05,
724
+ "loss": 0.2238,
725
+ "step": 50500
726
  },
727
  {
728
  "epoch": 1.8821966341895484,
729
+ "grad_norm": 3.541355609893799,
730
  "learning_rate": 1.8630056096840863e-05,
731
+ "loss": 0.2273,
732
+ "step": 51000
733
+ },
734
+ {
735
+ "epoch": 1.9006495423678773,
736
+ "grad_norm": 2.5415749549865723,
737
+ "learning_rate": 1.8322507627202048e-05,
738
+ "loss": 0.2298,
739
+ "step": 51500
740
  },
741
  {
742
  "epoch": 1.9191024505462062,
743
+ "grad_norm": 2.017357349395752,
744
  "learning_rate": 1.8014959157563233e-05,
745
+ "loss": 0.2285,
746
+ "step": 52000
747
+ },
748
+ {
749
+ "epoch": 1.937555358724535,
750
+ "grad_norm": 2.1992979049682617,
751
+ "learning_rate": 1.7707410687924418e-05,
752
+ "loss": 0.2322,
753
+ "step": 52500
754
  },
755
  {
756
  "epoch": 1.956008266902864,
757
+ "grad_norm": 3.5973832607269287,
758
  "learning_rate": 1.7399862218285603e-05,
759
+ "loss": 0.2282,
760
+ "step": 53000
761
+ },
762
+ {
763
+ "epoch": 1.974461175081193,
764
+ "grad_norm": 2.55898380279541,
765
+ "learning_rate": 1.709231374864679e-05,
766
+ "loss": 0.2257,
767
+ "step": 53500
768
  },
769
  {
770
  "epoch": 1.9929140832595218,
771
+ "grad_norm": 1.7618777751922607,
772
  "learning_rate": 1.678476527900797e-05,
773
+ "loss": 0.2239,
774
+ "step": 54000
775
  },
776
  {
777
  "epoch": 2.0,
778
+ "eval_loss": 0.2671372890472412,
779
+ "eval_mse": 0.2671373049978385,
780
+ "eval_runtime": 53.3061,
781
+ "eval_samples_per_second": 1807.261,
782
+ "eval_steps_per_second": 225.922,
783
+ "step": 54192
784
+ },
785
+ {
786
+ "epoch": 2.0113669914378507,
787
+ "grad_norm": 1.9996424913406372,
788
+ "learning_rate": 1.647721680936916e-05,
789
+ "loss": 0.2054,
790
+ "step": 54500
791
  },
792
  {
793
  "epoch": 2.0298198996161796,
794
+ "grad_norm": 3.9520423412323,
795
  "learning_rate": 1.6169668339730344e-05,
796
+ "loss": 0.1834,
797
+ "step": 55000
798
+ },
799
+ {
800
+ "epoch": 2.0482728077945085,
801
+ "grad_norm": 2.5493485927581787,
802
+ "learning_rate": 1.5862119870091526e-05,
803
+ "loss": 0.183,
804
+ "step": 55500
805
  },
806
  {
807
  "epoch": 2.0667257159728374,
808
+ "grad_norm": 2.3871965408325195,
809
  "learning_rate": 1.555457140045271e-05,
810
+ "loss": 0.182,
811
+ "step": 56000
812
+ },
813
+ {
814
+ "epoch": 2.0851786241511663,
815
+ "grad_norm": 2.607405185699463,
816
+ "learning_rate": 1.5247022930813898e-05,
817
+ "loss": 0.1831,
818
+ "step": 56500
819
  },
820
  {
821
  "epoch": 2.103631532329495,
822
+ "grad_norm": 3.14758038520813,
823
  "learning_rate": 1.4939474461175081e-05,
824
+ "loss": 0.1802,
825
+ "step": 57000
826
+ },
827
+ {
828
+ "epoch": 2.122084440507824,
829
+ "grad_norm": 2.3298392295837402,
830
+ "learning_rate": 1.4631925991536266e-05,
831
+ "loss": 0.1886,
832
+ "step": 57500
833
  },
834
  {
835
  "epoch": 2.140537348686153,
836
+ "grad_norm": 2.356092929840088,
837
  "learning_rate": 1.4324377521897453e-05,
838
+ "loss": 0.1866,
839
+ "step": 58000
840
+ },
841
+ {
842
+ "epoch": 2.158990256864482,
843
+ "grad_norm": 2.599311113357544,
844
+ "learning_rate": 1.4016829052258637e-05,
845
+ "loss": 0.1799,
846
+ "step": 58500
847
  },
848
  {
849
  "epoch": 2.1774431650428108,
850
+ "grad_norm": 2.818366050720215,
851
  "learning_rate": 1.3709280582619822e-05,
852
+ "loss": 0.1885,
853
+ "step": 59000
854
+ },
855
+ {
856
+ "epoch": 2.1958960732211397,
857
+ "grad_norm": 2.1913158893585205,
858
+ "learning_rate": 1.3401732112981005e-05,
859
+ "loss": 0.1817,
860
+ "step": 59500
861
  },
862
  {
863
  "epoch": 2.2143489813994686,
864
+ "grad_norm": 1.8911914825439453,
865
  "learning_rate": 1.3094183643342192e-05,
866
+ "loss": 0.1837,
867
+ "step": 60000
868
+ },
869
+ {
870
+ "epoch": 2.2328018895777975,
871
+ "grad_norm": 1.7305101156234741,
872
+ "learning_rate": 1.2786635173703375e-05,
873
+ "loss": 0.18,
874
+ "step": 60500
875
  },
876
  {
877
  "epoch": 2.2512547977561264,
878
+ "grad_norm": 2.5203235149383545,
879
  "learning_rate": 1.2479086704064562e-05,
880
+ "loss": 0.1874,
881
+ "step": 61000
882
+ },
883
+ {
884
+ "epoch": 2.2697077059344553,
885
+ "grad_norm": 2.4888010025024414,
886
+ "learning_rate": 1.2171538234425746e-05,
887
+ "loss": 0.1834,
888
+ "step": 61500
889
  },
890
  {
891
  "epoch": 2.288160614112784,
892
+ "grad_norm": 2.570237159729004,
893
  "learning_rate": 1.186398976478693e-05,
894
+ "loss": 0.1845,
895
+ "step": 62000
896
+ },
897
+ {
898
+ "epoch": 2.306613522291113,
899
+ "grad_norm": 3.0238893032073975,
900
+ "learning_rate": 1.1556441295148116e-05,
901
+ "loss": 0.1862,
902
+ "step": 62500
903
  },
904
  {
905
  "epoch": 2.325066430469442,
906
+ "grad_norm": 2.6648924350738525,
907
  "learning_rate": 1.1248892825509301e-05,
908
+ "loss": 0.1858,
909
+ "step": 63000
910
+ },
911
+ {
912
+ "epoch": 2.343519338647771,
913
+ "grad_norm": 2.760099411010742,
914
+ "learning_rate": 1.0941344355870485e-05,
915
+ "loss": 0.1836,
916
+ "step": 63500
917
  },
918
  {
919
  "epoch": 2.3619722468260997,
920
+ "grad_norm": 2.4213175773620605,
921
  "learning_rate": 1.0633795886231671e-05,
922
+ "loss": 0.1798,
923
+ "step": 64000
924
+ },
925
+ {
926
+ "epoch": 2.3804251550044286,
927
+ "grad_norm": 3.6033475399017334,
928
+ "learning_rate": 1.0326247416592857e-05,
929
+ "loss": 0.1811,
930
+ "step": 64500
931
  },
932
  {
933
  "epoch": 2.3988780631827575,
934
+ "grad_norm": 2.590794086456299,
935
  "learning_rate": 1.001869894695404e-05,
936
+ "loss": 0.1784,
937
+ "step": 65000
938
+ },
939
+ {
940
+ "epoch": 2.4173309713610864,
941
+ "grad_norm": 2.297030448913574,
942
+ "learning_rate": 9.711150477315225e-06,
943
+ "loss": 0.177,
944
+ "step": 65500
945
  },
946
  {
947
  "epoch": 2.4357838795394153,
948
+ "grad_norm": 2.6488535404205322,
949
  "learning_rate": 9.40360200767641e-06,
950
+ "loss": 0.1804,
951
+ "step": 66000
952
+ },
953
+ {
954
+ "epoch": 2.4542367877177442,
955
+ "grad_norm": 2.314232349395752,
956
+ "learning_rate": 9.096053538037595e-06,
957
+ "loss": 0.1808,
958
+ "step": 66500
959
  },
960
  {
961
  "epoch": 2.472689695896073,
962
+ "grad_norm": 2.5361621379852295,
963
  "learning_rate": 8.78850506839878e-06,
964
+ "loss": 0.1814,
965
+ "step": 67000
966
+ },
967
+ {
968
+ "epoch": 2.491142604074402,
969
+ "grad_norm": 1.9473010301589966,
970
+ "learning_rate": 8.480956598759966e-06,
971
+ "loss": 0.1819,
972
+ "step": 67500
973
  },
974
  {
975
  "epoch": 2.509595512252731,
976
+ "grad_norm": 1.6078424453735352,
977
  "learning_rate": 8.17340812912115e-06,
978
+ "loss": 0.1821,
979
+ "step": 68000
980
+ },
981
+ {
982
+ "epoch": 2.52804842043106,
983
+ "grad_norm": 2.9577689170837402,
984
+ "learning_rate": 7.865859659482334e-06,
985
+ "loss": 0.1816,
986
+ "step": 68500
987
  },
988
  {
989
  "epoch": 2.5465013286093887,
990
+ "grad_norm": 2.314099073410034,
991
  "learning_rate": 7.55831118984352e-06,
992
+ "loss": 0.1813,
993
+ "step": 69000
994
+ },
995
+ {
996
+ "epoch": 2.5649542367877176,
997
+ "grad_norm": 2.1542553901672363,
998
+ "learning_rate": 7.250762720204704e-06,
999
+ "loss": 0.1771,
1000
+ "step": 69500
1001
  },
1002
  {
1003
  "epoch": 2.5834071449660465,
1004
+ "grad_norm": 2.126908540725708,
1005
  "learning_rate": 6.94321425056589e-06,
1006
+ "loss": 0.1807,
1007
+ "step": 70000
1008
+ },
1009
+ {
1010
+ "epoch": 2.6018600531443754,
1011
+ "grad_norm": 2.9939024448394775,
1012
+ "learning_rate": 6.635665780927075e-06,
1013
+ "loss": 0.1792,
1014
+ "step": 70500
1015
  },
1016
  {
1017
  "epoch": 2.6203129613227043,
1018
+ "grad_norm": 3.436375856399536,
1019
  "learning_rate": 6.328117311288259e-06,
1020
+ "loss": 0.1801,
1021
+ "step": 71000
1022
+ },
1023
+ {
1024
+ "epoch": 2.638765869501033,
1025
+ "grad_norm": 2.515270233154297,
1026
+ "learning_rate": 6.020568841649444e-06,
1027
+ "loss": 0.1805,
1028
+ "step": 71500
1029
  },
1030
  {
1031
  "epoch": 2.657218777679362,
1032
+ "grad_norm": 2.924694061279297,
1033
  "learning_rate": 5.713020372010629e-06,
1034
+ "loss": 0.1782,
1035
+ "step": 72000
1036
+ },
1037
+ {
1038
+ "epoch": 2.675671685857691,
1039
+ "grad_norm": 2.6601603031158447,
1040
+ "learning_rate": 5.4054719023718145e-06,
1041
  "loss": 0.1821,
1042
+ "step": 72500
1043
  },
1044
  {
1045
  "epoch": 2.69412459403602,
1046
+ "grad_norm": 2.6807141304016113,
1047
  "learning_rate": 5.097923432732999e-06,
1048
+ "loss": 0.1793,
1049
+ "step": 73000
1050
+ },
1051
+ {
1052
+ "epoch": 2.712577502214349,
1053
+ "grad_norm": 3.814213991165161,
1054
+ "learning_rate": 4.790374963094184e-06,
1055
+ "loss": 0.1814,
1056
+ "step": 73500
1057
  },
1058
  {
1059
  "epoch": 2.7310304103926777,
1060
+ "grad_norm": 2.8295676708221436,
1061
  "learning_rate": 4.482826493455368e-06,
1062
+ "loss": 0.1761,
1063
+ "step": 74000
1064
+ },
1065
+ {
1066
+ "epoch": 2.7494833185710066,
1067
+ "grad_norm": 3.3659756183624268,
1068
+ "learning_rate": 4.175278023816553e-06,
1069
+ "loss": 0.1823,
1070
+ "step": 74500
1071
  },
1072
  {
1073
  "epoch": 2.7679362267493355,
1074
+ "grad_norm": 2.6024508476257324,
1075
  "learning_rate": 3.8677295541777385e-06,
1076
+ "loss": 0.1805,
1077
+ "step": 75000
1078
+ },
1079
+ {
1080
+ "epoch": 2.7863891349276644,
1081
+ "grad_norm": 2.8737952709198,
1082
+ "learning_rate": 3.5601810845389237e-06,
1083
+ "loss": 0.173,
1084
+ "step": 75500
1085
  },
1086
  {
1087
  "epoch": 2.8048420431059933,
1088
+ "grad_norm": 2.042231559753418,
1089
  "learning_rate": 3.2526326149001084e-06,
1090
+ "loss": 0.1748,
1091
+ "step": 76000
1092
+ },
1093
+ {
1094
+ "epoch": 2.823294951284322,
1095
+ "grad_norm": 2.5914924144744873,
1096
+ "learning_rate": 2.945084145261293e-06,
1097
+ "loss": 0.1796,
1098
+ "step": 76500
1099
  },
1100
  {
1101
  "epoch": 2.841747859462651,
1102
+ "grad_norm": 2.8601739406585693,
1103
  "learning_rate": 2.6375356756224782e-06,
1104
+ "loss": 0.178,
1105
+ "step": 77000
1106
+ },
1107
+ {
1108
+ "epoch": 2.86020076764098,
1109
+ "grad_norm": 2.323885917663574,
1110
+ "learning_rate": 2.3299872059836634e-06,
1111
+ "loss": 0.179,
1112
+ "step": 77500
1113
  },
1114
  {
1115
  "epoch": 2.878653675819309,
1116
+ "grad_norm": 3.8116016387939453,
1117
  "learning_rate": 2.022438736344848e-06,
1118
+ "loss": 0.1793,
1119
+ "step": 78000
1120
+ },
1121
+ {
1122
+ "epoch": 2.8971065839976378,
1123
+ "grad_norm": 3.1566436290740967,
1124
+ "learning_rate": 1.7148902667060328e-06,
1125
+ "loss": 0.1733,
1126
+ "step": 78500
1127
  },
1128
  {
1129
  "epoch": 2.9155594921759667,
1130
+ "grad_norm": 2.2577366828918457,
1131
  "learning_rate": 1.4073417970672177e-06,
1132
+ "loss": 0.1757,
1133
+ "step": 79000
1134
+ },
1135
+ {
1136
+ "epoch": 2.934012400354296,
1137
+ "grad_norm": 2.2155442237854004,
1138
+ "learning_rate": 1.0997933274284029e-06,
1139
+ "loss": 0.1805,
1140
+ "step": 79500
1141
  },
1142
  {
1143
  "epoch": 2.952465308532625,
1144
+ "grad_norm": 1.9121520519256592,
1145
  "learning_rate": 7.922448577895876e-07,
1146
+ "loss": 0.1775,
1147
+ "step": 80000
1148
+ },
1149
+ {
1150
+ "epoch": 2.970918216710954,
1151
+ "grad_norm": 3.8823773860931396,
1152
+ "learning_rate": 4.846963881507725e-07,
1153
+ "loss": 0.1808,
1154
+ "step": 80500
1155
  },
1156
  {
1157
  "epoch": 2.9893711248892827,
1158
+ "grad_norm": 2.4405837059020996,
1159
  "learning_rate": 1.771479185119575e-07,
1160
+ "loss": 0.1789,
1161
+ "step": 81000
1162
  },
1163
  {
1164
  "epoch": 3.0,
1165
+ "eval_loss": 0.26044028997421265,
1166
+ "eval_mse": 0.2604402849126996,
1167
+ "eval_runtime": 52.9352,
1168
+ "eval_samples_per_second": 1819.924,
1169
+ "eval_steps_per_second": 227.505,
1170
+ "step": 81288
1171
  },
1172
  {
1173
  "epoch": 3.0,
1174
+ "step": 81288,
1175
  "total_flos": 4.283504864539085e+16,
1176
+ "train_loss": 0.24568356713046358,
1177
+ "train_runtime": 4418.2249,
1178
+ "train_samples_per_second": 588.726,
1179
+ "train_steps_per_second": 18.398
1180
  }
1181
  ],
1182
  "logging_steps": 500,
1183
+ "max_steps": 81288,
1184
  "num_input_tokens_seen": 0,
1185
  "num_train_epochs": 3,
1186
  "save_steps": 500,
 
1197
  }
1198
  },
1199
  "total_flos": 4.283504864539085e+16,
1200
+ "train_batch_size": 32,
1201
  "trial_name": null,
1202
  "trial_params": null
1203
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cd2d47e7c406fcaf5be371ef202fba62dea81038520d7b2858a9e43c9ed99caa
3
  size 5368
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fb8dd27419a9946829959509b851f42cf22c307349e98461ea22e7782f4ee83
3
  size 5368