dimasik1987
commited on
Commit
•
33fb5fc
1
Parent(s):
76d84e8
Training in progress, step 50, checkpoint
Browse files
last-checkpoint/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 167832240
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:698ab20718cc10d9d6dd500e26c1af255955ecaaf7cd618b737f1f5c2f51ea4b
|
3 |
size 167832240
|
last-checkpoint/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 335922386
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5d67ab9c0de778ee91cb6782ba25854e65777cef0371c9e776ac393db195635e
|
3 |
size 335922386
|
last-checkpoint/rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14244
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d1aefd6b5cb7ab5293be3b57c5a81d7e2a62b44d9e7b6cc213897ed1dd28dec5
|
3 |
size 14244
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e69e2b49ea642509f0c688c16fb190b7cf27dac0a18903a5e2d1467d0343d8b8
|
3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
-
"epoch": 0.
|
5 |
"eval_steps": 500,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -182,6 +182,181 @@
|
|
182 |
"learning_rate": 0.00010654031292301432,
|
183 |
"loss": 2.3092,
|
184 |
"step": 25
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
185 |
}
|
186 |
],
|
187 |
"logging_steps": 1,
|
@@ -196,12 +371,12 @@
|
|
196 |
"should_evaluate": false,
|
197 |
"should_log": false,
|
198 |
"should_save": true,
|
199 |
-
"should_training_stop":
|
200 |
},
|
201 |
"attributes": {}
|
202 |
}
|
203 |
},
|
204 |
-
"total_flos":
|
205 |
"train_batch_size": 2,
|
206 |
"trial_name": null,
|
207 |
"trial_params": null
|
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
+
"epoch": 0.002591680704937152,
|
5 |
"eval_steps": 500,
|
6 |
+
"global_step": 50,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
182 |
"learning_rate": 0.00010654031292301432,
|
183 |
"loss": 2.3092,
|
184 |
"step": 25
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"epoch": 0.001347673966567319,
|
188 |
+
"grad_norm": 1.8183993101119995,
|
189 |
+
"learning_rate": 0.0001,
|
190 |
+
"loss": 2.2552,
|
191 |
+
"step": 26
|
192 |
+
},
|
193 |
+
{
|
194 |
+
"epoch": 0.001399507580666062,
|
195 |
+
"grad_norm": 1.5538502931594849,
|
196 |
+
"learning_rate": 9.345968707698569e-05,
|
197 |
+
"loss": 2.3462,
|
198 |
+
"step": 27
|
199 |
+
},
|
200 |
+
{
|
201 |
+
"epoch": 0.001451341194764805,
|
202 |
+
"grad_norm": 1.5595468282699585,
|
203 |
+
"learning_rate": 8.694738077799488e-05,
|
204 |
+
"loss": 2.3715,
|
205 |
+
"step": 28
|
206 |
+
},
|
207 |
+
{
|
208 |
+
"epoch": 0.001503174808863548,
|
209 |
+
"grad_norm": 2.1476974487304688,
|
210 |
+
"learning_rate": 8.049096779838719e-05,
|
211 |
+
"loss": 2.293,
|
212 |
+
"step": 29
|
213 |
+
},
|
214 |
+
{
|
215 |
+
"epoch": 0.0015550084229622911,
|
216 |
+
"grad_norm": 1.9469425678253174,
|
217 |
+
"learning_rate": 7.411809548974792e-05,
|
218 |
+
"loss": 2.3133,
|
219 |
+
"step": 30
|
220 |
+
},
|
221 |
+
{
|
222 |
+
"epoch": 0.0016068420370610342,
|
223 |
+
"grad_norm": 1.6128257513046265,
|
224 |
+
"learning_rate": 6.785605346968386e-05,
|
225 |
+
"loss": 2.2834,
|
226 |
+
"step": 31
|
227 |
+
},
|
228 |
+
{
|
229 |
+
"epoch": 0.0016586756511597772,
|
230 |
+
"grad_norm": 1.5214881896972656,
|
231 |
+
"learning_rate": 6.173165676349103e-05,
|
232 |
+
"loss": 2.5029,
|
233 |
+
"step": 32
|
234 |
+
},
|
235 |
+
{
|
236 |
+
"epoch": 0.0017105092652585203,
|
237 |
+
"grad_norm": 1.5626273155212402,
|
238 |
+
"learning_rate": 5.577113097809989e-05,
|
239 |
+
"loss": 2.4548,
|
240 |
+
"step": 33
|
241 |
+
},
|
242 |
+
{
|
243 |
+
"epoch": 0.001762342879357263,
|
244 |
+
"grad_norm": 1.8093630075454712,
|
245 |
+
"learning_rate": 5.000000000000002e-05,
|
246 |
+
"loss": 2.5801,
|
247 |
+
"step": 34
|
248 |
+
},
|
249 |
+
{
|
250 |
+
"epoch": 0.0018141764934560061,
|
251 |
+
"grad_norm": 1.7555335760116577,
|
252 |
+
"learning_rate": 4.444297669803981e-05,
|
253 |
+
"loss": 2.3909,
|
254 |
+
"step": 35
|
255 |
+
},
|
256 |
+
{
|
257 |
+
"epoch": 0.0018660101075547492,
|
258 |
+
"grad_norm": 1.7623857259750366,
|
259 |
+
"learning_rate": 3.9123857099127936e-05,
|
260 |
+
"loss": 2.3654,
|
261 |
+
"step": 36
|
262 |
+
},
|
263 |
+
{
|
264 |
+
"epoch": 0.0019178437216534922,
|
265 |
+
"grad_norm": 1.8313246965408325,
|
266 |
+
"learning_rate": 3.406541848999312e-05,
|
267 |
+
"loss": 2.053,
|
268 |
+
"step": 37
|
269 |
+
},
|
270 |
+
{
|
271 |
+
"epoch": 0.0019696773357522354,
|
272 |
+
"grad_norm": 1.8312631845474243,
|
273 |
+
"learning_rate": 2.9289321881345254e-05,
|
274 |
+
"loss": 2.2619,
|
275 |
+
"step": 38
|
276 |
+
},
|
277 |
+
{
|
278 |
+
"epoch": 0.0020215109498509785,
|
279 |
+
"grad_norm": 1.3328640460968018,
|
280 |
+
"learning_rate": 2.4816019252102273e-05,
|
281 |
+
"loss": 2.2884,
|
282 |
+
"step": 39
|
283 |
+
},
|
284 |
+
{
|
285 |
+
"epoch": 0.0020733445639497215,
|
286 |
+
"grad_norm": 1.395845890045166,
|
287 |
+
"learning_rate": 2.0664665970876496e-05,
|
288 |
+
"loss": 2.1892,
|
289 |
+
"step": 40
|
290 |
+
},
|
291 |
+
{
|
292 |
+
"epoch": 0.0021251781780484646,
|
293 |
+
"grad_norm": 1.6217460632324219,
|
294 |
+
"learning_rate": 1.6853038769745467e-05,
|
295 |
+
"loss": 2.4114,
|
296 |
+
"step": 41
|
297 |
+
},
|
298 |
+
{
|
299 |
+
"epoch": 0.0021770117921472076,
|
300 |
+
"grad_norm": 1.482170581817627,
|
301 |
+
"learning_rate": 1.339745962155613e-05,
|
302 |
+
"loss": 2.3014,
|
303 |
+
"step": 42
|
304 |
+
},
|
305 |
+
{
|
306 |
+
"epoch": 0.0022288454062459506,
|
307 |
+
"grad_norm": 1.5318725109100342,
|
308 |
+
"learning_rate": 1.0312725846731175e-05,
|
309 |
+
"loss": 2.3725,
|
310 |
+
"step": 43
|
311 |
+
},
|
312 |
+
{
|
313 |
+
"epoch": 0.0022806790203446937,
|
314 |
+
"grad_norm": 1.729251742362976,
|
315 |
+
"learning_rate": 7.612046748871327e-06,
|
316 |
+
"loss": 2.306,
|
317 |
+
"step": 44
|
318 |
+
},
|
319 |
+
{
|
320 |
+
"epoch": 0.0023325126344434367,
|
321 |
+
"grad_norm": 1.675718903541565,
|
322 |
+
"learning_rate": 5.306987050489442e-06,
|
323 |
+
"loss": 2.3207,
|
324 |
+
"step": 45
|
325 |
+
},
|
326 |
+
{
|
327 |
+
"epoch": 0.0023843462485421798,
|
328 |
+
"grad_norm": 1.5841999053955078,
|
329 |
+
"learning_rate": 3.40741737109318e-06,
|
330 |
+
"loss": 2.1703,
|
331 |
+
"step": 46
|
332 |
+
},
|
333 |
+
{
|
334 |
+
"epoch": 0.002436179862640923,
|
335 |
+
"grad_norm": 1.4216182231903076,
|
336 |
+
"learning_rate": 1.921471959676957e-06,
|
337 |
+
"loss": 2.2403,
|
338 |
+
"step": 47
|
339 |
+
},
|
340 |
+
{
|
341 |
+
"epoch": 0.002488013476739666,
|
342 |
+
"grad_norm": 1.5462912321090698,
|
343 |
+
"learning_rate": 8.555138626189618e-07,
|
344 |
+
"loss": 2.3091,
|
345 |
+
"step": 48
|
346 |
+
},
|
347 |
+
{
|
348 |
+
"epoch": 0.002539847090838409,
|
349 |
+
"grad_norm": 1.3521757125854492,
|
350 |
+
"learning_rate": 2.141076761396521e-07,
|
351 |
+
"loss": 2.4685,
|
352 |
+
"step": 49
|
353 |
+
},
|
354 |
+
{
|
355 |
+
"epoch": 0.002591680704937152,
|
356 |
+
"grad_norm": 1.295125961303711,
|
357 |
+
"learning_rate": 0.0,
|
358 |
+
"loss": 2.1998,
|
359 |
+
"step": 50
|
360 |
}
|
361 |
],
|
362 |
"logging_steps": 1,
|
|
|
371 |
"should_evaluate": false,
|
372 |
"should_log": false,
|
373 |
"should_save": true,
|
374 |
+
"should_training_stop": true
|
375 |
},
|
376 |
"attributes": {}
|
377 |
}
|
378 |
},
|
379 |
+
"total_flos": 7.41887283560448e+16,
|
380 |
"train_batch_size": 2,
|
381 |
"trial_name": null,
|
382 |
"trial_params": null
|