Training in progress, step 2404, checkpoint
Browse files
last-checkpoint/model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 891558696
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:92e58e8695630ff5ccb135b4333064504c3cd8bd2865e2fc6fd39f128ab7e34e
|
| 3 |
size 891558696
|
last-checkpoint/optimizer.pt
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 1783272762
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b52225a14b00d71733d4f2de52564812db584c9351e5759268e2690175489248
|
| 3 |
size 1783272762
|
last-checkpoint/rng_state.pth
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 14244
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a2ad5e4b3f643cc8277d6ec1988fa3ef711691975814d2464a77f58b0a264633
|
| 3 |
size 14244
|
last-checkpoint/scheduler.pt
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 1064
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:119e87fda9d6cbceb29a4ec44787fae3f1908a40119a5846fa63f99a34ad8544
|
| 3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
{
|
| 2 |
"best_metric": 0.23246362805366516,
|
| 3 |
"best_model_checkpoint": "./fine-tuned/checkpoint-2000",
|
| 4 |
-
"epoch": 3.
|
| 5 |
"eval_steps": 100,
|
| 6 |
-
"global_step":
|
| 7 |
"is_hyper_param_search": false,
|
| 8 |
"is_local_process_zero": true,
|
| 9 |
"is_world_process_zero": true,
|
|
@@ -25,9 +25,9 @@
|
|
| 25 |
{
|
| 26 |
"epoch": 0.1662510390689942,
|
| 27 |
"eval_loss": 0.35283052921295166,
|
| 28 |
-
"eval_runtime":
|
| 29 |
-
"eval_samples_per_second":
|
| 30 |
-
"eval_steps_per_second": 1.
|
| 31 |
"step": 100
|
| 32 |
},
|
| 33 |
{
|
|
@@ -47,9 +47,9 @@
|
|
| 47 |
{
|
| 48 |
"epoch": 0.3325020781379884,
|
| 49 |
"eval_loss": 0.30865946412086487,
|
| 50 |
-
"eval_runtime":
|
| 51 |
-
"eval_samples_per_second":
|
| 52 |
-
"eval_steps_per_second": 1.
|
| 53 |
"step": 200
|
| 54 |
},
|
| 55 |
{
|
|
@@ -69,9 +69,9 @@
|
|
| 69 |
{
|
| 70 |
"epoch": 0.49875311720698257,
|
| 71 |
"eval_loss": 0.2891499996185303,
|
| 72 |
-
"eval_runtime":
|
| 73 |
-
"eval_samples_per_second":
|
| 74 |
-
"eval_steps_per_second": 1.
|
| 75 |
"step": 300
|
| 76 |
},
|
| 77 |
{
|
|
@@ -91,9 +91,9 @@
|
|
| 91 |
{
|
| 92 |
"epoch": 0.6650041562759768,
|
| 93 |
"eval_loss": 0.2757515609264374,
|
| 94 |
-
"eval_runtime":
|
| 95 |
-
"eval_samples_per_second":
|
| 96 |
-
"eval_steps_per_second": 1.
|
| 97 |
"step": 400
|
| 98 |
},
|
| 99 |
{
|
|
@@ -113,9 +113,9 @@
|
|
| 113 |
{
|
| 114 |
"epoch": 0.8312551953449709,
|
| 115 |
"eval_loss": 0.2673098146915436,
|
| 116 |
-
"eval_runtime":
|
| 117 |
-
"eval_samples_per_second":
|
| 118 |
-
"eval_steps_per_second": 1.
|
| 119 |
"step": 500
|
| 120 |
},
|
| 121 |
{
|
|
@@ -135,9 +135,9 @@
|
|
| 135 |
{
|
| 136 |
"epoch": 0.9975062344139651,
|
| 137 |
"eval_loss": 0.26070085167884827,
|
| 138 |
-
"eval_runtime":
|
| 139 |
-
"eval_samples_per_second": 13.
|
| 140 |
-
"eval_steps_per_second": 1.
|
| 141 |
"step": 600
|
| 142 |
},
|
| 143 |
{
|
|
@@ -157,9 +157,9 @@
|
|
| 157 |
{
|
| 158 |
"epoch": 1.1637572734829593,
|
| 159 |
"eval_loss": 0.256939560174942,
|
| 160 |
-
"eval_runtime":
|
| 161 |
-
"eval_samples_per_second":
|
| 162 |
-
"eval_steps_per_second": 1.
|
| 163 |
"step": 700
|
| 164 |
},
|
| 165 |
{
|
|
@@ -179,9 +179,9 @@
|
|
| 179 |
{
|
| 180 |
"epoch": 1.3300083125519535,
|
| 181 |
"eval_loss": 0.2525966763496399,
|
| 182 |
-
"eval_runtime":
|
| 183 |
-
"eval_samples_per_second":
|
| 184 |
-
"eval_steps_per_second": 1.
|
| 185 |
"step": 800
|
| 186 |
},
|
| 187 |
{
|
|
@@ -201,9 +201,9 @@
|
|
| 201 |
{
|
| 202 |
"epoch": 1.4962593516209477,
|
| 203 |
"eval_loss": 0.24994711577892303,
|
| 204 |
-
"eval_runtime":
|
| 205 |
-
"eval_samples_per_second": 13.
|
| 206 |
-
"eval_steps_per_second": 1.
|
| 207 |
"step": 900
|
| 208 |
},
|
| 209 |
{
|
|
@@ -223,9 +223,9 @@
|
|
| 223 |
{
|
| 224 |
"epoch": 1.6625103906899419,
|
| 225 |
"eval_loss": 0.246443971991539,
|
| 226 |
-
"eval_runtime":
|
| 227 |
-
"eval_samples_per_second": 13.
|
| 228 |
-
"eval_steps_per_second": 1.
|
| 229 |
"step": 1000
|
| 230 |
},
|
| 231 |
{
|
|
@@ -245,9 +245,9 @@
|
|
| 245 |
{
|
| 246 |
"epoch": 1.828761429758936,
|
| 247 |
"eval_loss": 0.24409395456314087,
|
| 248 |
-
"eval_runtime":
|
| 249 |
-
"eval_samples_per_second": 13.
|
| 250 |
-
"eval_steps_per_second": 1.
|
| 251 |
"step": 1100
|
| 252 |
},
|
| 253 |
{
|
|
@@ -267,9 +267,9 @@
|
|
| 267 |
{
|
| 268 |
"epoch": 1.9950124688279303,
|
| 269 |
"eval_loss": 0.2411041557788849,
|
| 270 |
-
"eval_runtime":
|
| 271 |
-
"eval_samples_per_second": 13.
|
| 272 |
-
"eval_steps_per_second": 1.
|
| 273 |
"step": 1200
|
| 274 |
},
|
| 275 |
{
|
|
@@ -289,9 +289,9 @@
|
|
| 289 |
{
|
| 290 |
"epoch": 2.1612635078969245,
|
| 291 |
"eval_loss": 0.23963774740695953,
|
| 292 |
-
"eval_runtime":
|
| 293 |
-
"eval_samples_per_second": 13.
|
| 294 |
-
"eval_steps_per_second": 1.
|
| 295 |
"step": 1300
|
| 296 |
},
|
| 297 |
{
|
|
@@ -311,9 +311,9 @@
|
|
| 311 |
{
|
| 312 |
"epoch": 2.3275145469659186,
|
| 313 |
"eval_loss": 0.23829442262649536,
|
| 314 |
-
"eval_runtime":
|
| 315 |
-
"eval_samples_per_second": 13.
|
| 316 |
-
"eval_steps_per_second": 1.
|
| 317 |
"step": 1400
|
| 318 |
},
|
| 319 |
{
|
|
@@ -333,9 +333,9 @@
|
|
| 333 |
{
|
| 334 |
"epoch": 2.493765586034913,
|
| 335 |
"eval_loss": 0.2369847148656845,
|
| 336 |
-
"eval_runtime":
|
| 337 |
-
"eval_samples_per_second":
|
| 338 |
-
"eval_steps_per_second": 1.
|
| 339 |
"step": 1500
|
| 340 |
},
|
| 341 |
{
|
|
@@ -355,9 +355,9 @@
|
|
| 355 |
{
|
| 356 |
"epoch": 2.660016625103907,
|
| 357 |
"eval_loss": 0.23503336310386658,
|
| 358 |
-
"eval_runtime":
|
| 359 |
-
"eval_samples_per_second": 13.
|
| 360 |
-
"eval_steps_per_second": 1.
|
| 361 |
"step": 1600
|
| 362 |
},
|
| 363 |
{
|
|
@@ -377,9 +377,9 @@
|
|
| 377 |
{
|
| 378 |
"epoch": 2.826267664172901,
|
| 379 |
"eval_loss": 0.23422521352767944,
|
| 380 |
-
"eval_runtime":
|
| 381 |
-
"eval_samples_per_second": 13.
|
| 382 |
-
"eval_steps_per_second": 1.
|
| 383 |
"step": 1700
|
| 384 |
},
|
| 385 |
{
|
|
@@ -399,9 +399,9 @@
|
|
| 399 |
{
|
| 400 |
"epoch": 2.9925187032418954,
|
| 401 |
"eval_loss": 0.23330263793468475,
|
| 402 |
-
"eval_runtime":
|
| 403 |
-
"eval_samples_per_second": 13.
|
| 404 |
-
"eval_steps_per_second": 1.
|
| 405 |
"step": 1800
|
| 406 |
},
|
| 407 |
{
|
|
@@ -421,9 +421,9 @@
|
|
| 421 |
{
|
| 422 |
"epoch": 3.1587697423108896,
|
| 423 |
"eval_loss": 0.23302872478961945,
|
| 424 |
-
"eval_runtime":
|
| 425 |
-
"eval_samples_per_second":
|
| 426 |
-
"eval_steps_per_second": 1.
|
| 427 |
"step": 1900
|
| 428 |
},
|
| 429 |
{
|
|
@@ -443,10 +443,98 @@
|
|
| 443 |
{
|
| 444 |
"epoch": 3.3250207813798838,
|
| 445 |
"eval_loss": 0.23246362805366516,
|
| 446 |
-
"eval_runtime":
|
| 447 |
-
"eval_samples_per_second": 13.
|
| 448 |
-
"eval_steps_per_second": 1.
|
| 449 |
"step": 2000
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 450 |
}
|
| 451 |
],
|
| 452 |
"logging_steps": 50,
|
|
@@ -461,12 +549,12 @@
|
|
| 461 |
"should_evaluate": false,
|
| 462 |
"should_log": false,
|
| 463 |
"should_save": true,
|
| 464 |
-
"should_training_stop":
|
| 465 |
},
|
| 466 |
"attributes": {}
|
| 467 |
}
|
| 468 |
},
|
| 469 |
-
"total_flos":
|
| 470 |
"train_batch_size": 8,
|
| 471 |
"trial_name": null,
|
| 472 |
"trial_params": null
|
|
|
|
| 1 |
{
|
| 2 |
"best_metric": 0.23246362805366516,
|
| 3 |
"best_model_checkpoint": "./fine-tuned/checkpoint-2000",
|
| 4 |
+
"epoch": 3.99667497921862,
|
| 5 |
"eval_steps": 100,
|
| 6 |
+
"global_step": 2404,
|
| 7 |
"is_hyper_param_search": false,
|
| 8 |
"is_local_process_zero": true,
|
| 9 |
"is_world_process_zero": true,
|
|
|
|
| 25 |
{
|
| 26 |
"epoch": 0.1662510390689942,
|
| 27 |
"eval_loss": 0.35283052921295166,
|
| 28 |
+
"eval_runtime": 36.3275,
|
| 29 |
+
"eval_samples_per_second": 13.598,
|
| 30 |
+
"eval_steps_per_second": 1.707,
|
| 31 |
"step": 100
|
| 32 |
},
|
| 33 |
{
|
|
|
|
| 47 |
{
|
| 48 |
"epoch": 0.3325020781379884,
|
| 49 |
"eval_loss": 0.30865946412086487,
|
| 50 |
+
"eval_runtime": 36.5834,
|
| 51 |
+
"eval_samples_per_second": 13.503,
|
| 52 |
+
"eval_steps_per_second": 1.695,
|
| 53 |
"step": 200
|
| 54 |
},
|
| 55 |
{
|
|
|
|
| 69 |
{
|
| 70 |
"epoch": 0.49875311720698257,
|
| 71 |
"eval_loss": 0.2891499996185303,
|
| 72 |
+
"eval_runtime": 36.4356,
|
| 73 |
+
"eval_samples_per_second": 13.558,
|
| 74 |
+
"eval_steps_per_second": 1.702,
|
| 75 |
"step": 300
|
| 76 |
},
|
| 77 |
{
|
|
|
|
| 91 |
{
|
| 92 |
"epoch": 0.6650041562759768,
|
| 93 |
"eval_loss": 0.2757515609264374,
|
| 94 |
+
"eval_runtime": 36.561,
|
| 95 |
+
"eval_samples_per_second": 13.512,
|
| 96 |
+
"eval_steps_per_second": 1.696,
|
| 97 |
"step": 400
|
| 98 |
},
|
| 99 |
{
|
|
|
|
| 113 |
{
|
| 114 |
"epoch": 0.8312551953449709,
|
| 115 |
"eval_loss": 0.2673098146915436,
|
| 116 |
+
"eval_runtime": 36.4611,
|
| 117 |
+
"eval_samples_per_second": 13.549,
|
| 118 |
+
"eval_steps_per_second": 1.7,
|
| 119 |
"step": 500
|
| 120 |
},
|
| 121 |
{
|
|
|
|
| 135 |
{
|
| 136 |
"epoch": 0.9975062344139651,
|
| 137 |
"eval_loss": 0.26070085167884827,
|
| 138 |
+
"eval_runtime": 36.5977,
|
| 139 |
+
"eval_samples_per_second": 13.498,
|
| 140 |
+
"eval_steps_per_second": 1.694,
|
| 141 |
"step": 600
|
| 142 |
},
|
| 143 |
{
|
|
|
|
| 157 |
{
|
| 158 |
"epoch": 1.1637572734829593,
|
| 159 |
"eval_loss": 0.256939560174942,
|
| 160 |
+
"eval_runtime": 36.5571,
|
| 161 |
+
"eval_samples_per_second": 13.513,
|
| 162 |
+
"eval_steps_per_second": 1.696,
|
| 163 |
"step": 700
|
| 164 |
},
|
| 165 |
{
|
|
|
|
| 179 |
{
|
| 180 |
"epoch": 1.3300083125519535,
|
| 181 |
"eval_loss": 0.2525966763496399,
|
| 182 |
+
"eval_runtime": 36.5366,
|
| 183 |
+
"eval_samples_per_second": 13.521,
|
| 184 |
+
"eval_steps_per_second": 1.697,
|
| 185 |
"step": 800
|
| 186 |
},
|
| 187 |
{
|
|
|
|
| 201 |
{
|
| 202 |
"epoch": 1.4962593516209477,
|
| 203 |
"eval_loss": 0.24994711577892303,
|
| 204 |
+
"eval_runtime": 36.5909,
|
| 205 |
+
"eval_samples_per_second": 13.501,
|
| 206 |
+
"eval_steps_per_second": 1.694,
|
| 207 |
"step": 900
|
| 208 |
},
|
| 209 |
{
|
|
|
|
| 223 |
{
|
| 224 |
"epoch": 1.6625103906899419,
|
| 225 |
"eval_loss": 0.246443971991539,
|
| 226 |
+
"eval_runtime": 36.514,
|
| 227 |
+
"eval_samples_per_second": 13.529,
|
| 228 |
+
"eval_steps_per_second": 1.698,
|
| 229 |
"step": 1000
|
| 230 |
},
|
| 231 |
{
|
|
|
|
| 245 |
{
|
| 246 |
"epoch": 1.828761429758936,
|
| 247 |
"eval_loss": 0.24409395456314087,
|
| 248 |
+
"eval_runtime": 36.5322,
|
| 249 |
+
"eval_samples_per_second": 13.522,
|
| 250 |
+
"eval_steps_per_second": 1.697,
|
| 251 |
"step": 1100
|
| 252 |
},
|
| 253 |
{
|
|
|
|
| 267 |
{
|
| 268 |
"epoch": 1.9950124688279303,
|
| 269 |
"eval_loss": 0.2411041557788849,
|
| 270 |
+
"eval_runtime": 36.4594,
|
| 271 |
+
"eval_samples_per_second": 13.549,
|
| 272 |
+
"eval_steps_per_second": 1.701,
|
| 273 |
"step": 1200
|
| 274 |
},
|
| 275 |
{
|
|
|
|
| 289 |
{
|
| 290 |
"epoch": 2.1612635078969245,
|
| 291 |
"eval_loss": 0.23963774740695953,
|
| 292 |
+
"eval_runtime": 36.6761,
|
| 293 |
+
"eval_samples_per_second": 13.469,
|
| 294 |
+
"eval_steps_per_second": 1.69,
|
| 295 |
"step": 1300
|
| 296 |
},
|
| 297 |
{
|
|
|
|
| 311 |
{
|
| 312 |
"epoch": 2.3275145469659186,
|
| 313 |
"eval_loss": 0.23829442262649536,
|
| 314 |
+
"eval_runtime": 36.5959,
|
| 315 |
+
"eval_samples_per_second": 13.499,
|
| 316 |
+
"eval_steps_per_second": 1.694,
|
| 317 |
"step": 1400
|
| 318 |
},
|
| 319 |
{
|
|
|
|
| 333 |
{
|
| 334 |
"epoch": 2.493765586034913,
|
| 335 |
"eval_loss": 0.2369847148656845,
|
| 336 |
+
"eval_runtime": 36.5924,
|
| 337 |
+
"eval_samples_per_second": 13.5,
|
| 338 |
+
"eval_steps_per_second": 1.694,
|
| 339 |
"step": 1500
|
| 340 |
},
|
| 341 |
{
|
|
|
|
| 355 |
{
|
| 356 |
"epoch": 2.660016625103907,
|
| 357 |
"eval_loss": 0.23503336310386658,
|
| 358 |
+
"eval_runtime": 36.5255,
|
| 359 |
+
"eval_samples_per_second": 13.525,
|
| 360 |
+
"eval_steps_per_second": 1.697,
|
| 361 |
"step": 1600
|
| 362 |
},
|
| 363 |
{
|
|
|
|
| 377 |
{
|
| 378 |
"epoch": 2.826267664172901,
|
| 379 |
"eval_loss": 0.23422521352767944,
|
| 380 |
+
"eval_runtime": 36.5068,
|
| 381 |
+
"eval_samples_per_second": 13.532,
|
| 382 |
+
"eval_steps_per_second": 1.698,
|
| 383 |
"step": 1700
|
| 384 |
},
|
| 385 |
{
|
|
|
|
| 399 |
{
|
| 400 |
"epoch": 2.9925187032418954,
|
| 401 |
"eval_loss": 0.23330263793468475,
|
| 402 |
+
"eval_runtime": 36.5181,
|
| 403 |
+
"eval_samples_per_second": 13.528,
|
| 404 |
+
"eval_steps_per_second": 1.698,
|
| 405 |
"step": 1800
|
| 406 |
},
|
| 407 |
{
|
|
|
|
| 421 |
{
|
| 422 |
"epoch": 3.1587697423108896,
|
| 423 |
"eval_loss": 0.23302872478961945,
|
| 424 |
+
"eval_runtime": 36.6159,
|
| 425 |
+
"eval_samples_per_second": 13.491,
|
| 426 |
+
"eval_steps_per_second": 1.693,
|
| 427 |
"step": 1900
|
| 428 |
},
|
| 429 |
{
|
|
|
|
| 443 |
{
|
| 444 |
"epoch": 3.3250207813798838,
|
| 445 |
"eval_loss": 0.23246362805366516,
|
| 446 |
+
"eval_runtime": 36.5215,
|
| 447 |
+
"eval_samples_per_second": 13.526,
|
| 448 |
+
"eval_steps_per_second": 1.698,
|
| 449 |
"step": 2000
|
| 450 |
+
},
|
| 451 |
+
{
|
| 452 |
+
"epoch": 3.408146300914381,
|
| 453 |
+
"grad_norm": 19064.091796875,
|
| 454 |
+
"learning_rate": 4.4176372712146424e-06,
|
| 455 |
+
"loss": 0.2531,
|
| 456 |
+
"step": 2050
|
| 457 |
+
},
|
| 458 |
+
{
|
| 459 |
+
"epoch": 3.491271820448878,
|
| 460 |
+
"grad_norm": 24487.681640625,
|
| 461 |
+
"learning_rate": 3.793677204658902e-06,
|
| 462 |
+
"loss": 0.2763,
|
| 463 |
+
"step": 2100
|
| 464 |
+
},
|
| 465 |
+
{
|
| 466 |
+
"epoch": 3.491271820448878,
|
| 467 |
+
"eval_loss": 0.23180559277534485,
|
| 468 |
+
"eval_runtime": 36.4965,
|
| 469 |
+
"eval_samples_per_second": 13.536,
|
| 470 |
+
"eval_steps_per_second": 1.699,
|
| 471 |
+
"step": 2100
|
| 472 |
+
},
|
| 473 |
+
{
|
| 474 |
+
"epoch": 3.574397339983375,
|
| 475 |
+
"grad_norm": 33160.66015625,
|
| 476 |
+
"learning_rate": 3.1697171381031614e-06,
|
| 477 |
+
"loss": 0.2706,
|
| 478 |
+
"step": 2150
|
| 479 |
+
},
|
| 480 |
+
{
|
| 481 |
+
"epoch": 3.657522859517872,
|
| 482 |
+
"grad_norm": 20284.03515625,
|
| 483 |
+
"learning_rate": 2.545757071547421e-06,
|
| 484 |
+
"loss": 0.2521,
|
| 485 |
+
"step": 2200
|
| 486 |
+
},
|
| 487 |
+
{
|
| 488 |
+
"epoch": 3.657522859517872,
|
| 489 |
+
"eval_loss": 0.23114623129367828,
|
| 490 |
+
"eval_runtime": 36.5506,
|
| 491 |
+
"eval_samples_per_second": 13.516,
|
| 492 |
+
"eval_steps_per_second": 1.696,
|
| 493 |
+
"step": 2200
|
| 494 |
+
},
|
| 495 |
+
{
|
| 496 |
+
"epoch": 3.7406483790523692,
|
| 497 |
+
"grad_norm": 55974.03125,
|
| 498 |
+
"learning_rate": 1.9217970049916804e-06,
|
| 499 |
+
"loss": 0.2542,
|
| 500 |
+
"step": 2250
|
| 501 |
+
},
|
| 502 |
+
{
|
| 503 |
+
"epoch": 3.8237738985868663,
|
| 504 |
+
"grad_norm": 18724.478515625,
|
| 505 |
+
"learning_rate": 1.2978369384359402e-06,
|
| 506 |
+
"loss": 0.2684,
|
| 507 |
+
"step": 2300
|
| 508 |
+
},
|
| 509 |
+
{
|
| 510 |
+
"epoch": 3.8237738985868663,
|
| 511 |
+
"eval_loss": 0.23083852231502533,
|
| 512 |
+
"eval_runtime": 36.3806,
|
| 513 |
+
"eval_samples_per_second": 13.579,
|
| 514 |
+
"eval_steps_per_second": 1.704,
|
| 515 |
+
"step": 2300
|
| 516 |
+
},
|
| 517 |
+
{
|
| 518 |
+
"epoch": 3.9068994181213634,
|
| 519 |
+
"grad_norm": 26152.619140625,
|
| 520 |
+
"learning_rate": 6.738768718801997e-07,
|
| 521 |
+
"loss": 0.2582,
|
| 522 |
+
"step": 2350
|
| 523 |
+
},
|
| 524 |
+
{
|
| 525 |
+
"epoch": 3.9900249376558605,
|
| 526 |
+
"grad_norm": 20345.572265625,
|
| 527 |
+
"learning_rate": 4.9916805324459236e-08,
|
| 528 |
+
"loss": 0.2529,
|
| 529 |
+
"step": 2400
|
| 530 |
+
},
|
| 531 |
+
{
|
| 532 |
+
"epoch": 3.9900249376558605,
|
| 533 |
+
"eval_loss": 0.23079748451709747,
|
| 534 |
+
"eval_runtime": 36.4645,
|
| 535 |
+
"eval_samples_per_second": 13.547,
|
| 536 |
+
"eval_steps_per_second": 1.7,
|
| 537 |
+
"step": 2400
|
| 538 |
}
|
| 539 |
],
|
| 540 |
"logging_steps": 50,
|
|
|
|
| 549 |
"should_evaluate": false,
|
| 550 |
"should_log": false,
|
| 551 |
"should_save": true,
|
| 552 |
+
"should_training_stop": true
|
| 553 |
},
|
| 554 |
"attributes": {}
|
| 555 |
}
|
| 556 |
},
|
| 557 |
+
"total_flos": 2.342112942882816e+16,
|
| 558 |
"train_batch_size": 8,
|
| 559 |
"trial_name": null,
|
| 560 |
"trial_params": null
|