vdos's picture
Training in progress, step 50, checkpoint
056a30c verified
{
"best_metric": 2.0190470218658447,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.11555683952043912,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0023111367904087824,
"grad_norm": 4.737668037414551,
"learning_rate": 5e-05,
"loss": 8.8266,
"step": 1
},
{
"epoch": 0.0023111367904087824,
"eval_loss": 10.756378173828125,
"eval_runtime": 88.4893,
"eval_samples_per_second": 32.942,
"eval_steps_per_second": 4.125,
"step": 1
},
{
"epoch": 0.004622273580817565,
"grad_norm": 4.769935131072998,
"learning_rate": 0.0001,
"loss": 8.5569,
"step": 2
},
{
"epoch": 0.006933410371226347,
"grad_norm": 5.380407810211182,
"learning_rate": 9.989294616193017e-05,
"loss": 8.9336,
"step": 3
},
{
"epoch": 0.00924454716163513,
"grad_norm": 12.746003150939941,
"learning_rate": 9.957224306869053e-05,
"loss": 8.4446,
"step": 4
},
{
"epoch": 0.011555683952043911,
"grad_norm": 8.30001449584961,
"learning_rate": 9.903926402016153e-05,
"loss": 7.601,
"step": 5
},
{
"epoch": 0.013866820742452693,
"grad_norm": 7.933149337768555,
"learning_rate": 9.829629131445342e-05,
"loss": 5.6546,
"step": 6
},
{
"epoch": 0.016177957532861477,
"grad_norm": 6.780417442321777,
"learning_rate": 9.73465064747553e-05,
"loss": 5.0568,
"step": 7
},
{
"epoch": 0.01848909432327026,
"grad_norm": 6.728231430053711,
"learning_rate": 9.619397662556435e-05,
"loss": 4.211,
"step": 8
},
{
"epoch": 0.02080023111367904,
"grad_norm": 7.113791465759277,
"learning_rate": 9.484363707663442e-05,
"loss": 3.9294,
"step": 9
},
{
"epoch": 0.023111367904087823,
"grad_norm": 4.8951311111450195,
"learning_rate": 9.330127018922194e-05,
"loss": 3.1353,
"step": 10
},
{
"epoch": 0.025422504694496605,
"grad_norm": 8.186279296875,
"learning_rate": 9.157348061512727e-05,
"loss": 3.0856,
"step": 11
},
{
"epoch": 0.027733641484905387,
"grad_norm": 8.730195045471191,
"learning_rate": 8.966766701456177e-05,
"loss": 3.2603,
"step": 12
},
{
"epoch": 0.03004477827531417,
"grad_norm": 10.195311546325684,
"learning_rate": 8.759199037394887e-05,
"loss": 2.8125,
"step": 13
},
{
"epoch": 0.032355915065722954,
"grad_norm": 5.314338207244873,
"learning_rate": 8.535533905932738e-05,
"loss": 2.3914,
"step": 14
},
{
"epoch": 0.03466705185613173,
"grad_norm": 3.3374502658843994,
"learning_rate": 8.296729075500344e-05,
"loss": 2.2778,
"step": 15
},
{
"epoch": 0.03697818864654052,
"grad_norm": 6.324762344360352,
"learning_rate": 8.043807145043604e-05,
"loss": 2.4252,
"step": 16
},
{
"epoch": 0.039289325436949296,
"grad_norm": 4.7701239585876465,
"learning_rate": 7.777851165098012e-05,
"loss": 2.4389,
"step": 17
},
{
"epoch": 0.04160046222735808,
"grad_norm": 2.40031361579895,
"learning_rate": 7.500000000000001e-05,
"loss": 2.0299,
"step": 18
},
{
"epoch": 0.04391159901776687,
"grad_norm": 4.168339729309082,
"learning_rate": 7.211443451095007e-05,
"loss": 2.0835,
"step": 19
},
{
"epoch": 0.046222735808175645,
"grad_norm": 2.804626941680908,
"learning_rate": 6.91341716182545e-05,
"loss": 1.9547,
"step": 20
},
{
"epoch": 0.04853387259858443,
"grad_norm": 2.20493745803833,
"learning_rate": 6.607197326515808e-05,
"loss": 1.9779,
"step": 21
},
{
"epoch": 0.05084500938899321,
"grad_norm": 1.6454471349716187,
"learning_rate": 6.294095225512603e-05,
"loss": 2.051,
"step": 22
},
{
"epoch": 0.053156146179401995,
"grad_norm": 1.7552924156188965,
"learning_rate": 5.9754516100806423e-05,
"loss": 2.1951,
"step": 23
},
{
"epoch": 0.05546728296981077,
"grad_norm": 1.9301644563674927,
"learning_rate": 5.6526309611002594e-05,
"loss": 2.3227,
"step": 24
},
{
"epoch": 0.05777841976021956,
"grad_norm": 2.2262816429138184,
"learning_rate": 5.327015646150716e-05,
"loss": 2.5943,
"step": 25
},
{
"epoch": 0.05777841976021956,
"eval_loss": 2.122105836868286,
"eval_runtime": 88.3962,
"eval_samples_per_second": 32.977,
"eval_steps_per_second": 4.129,
"step": 25
},
{
"epoch": 0.06008955655062834,
"grad_norm": 2.3176376819610596,
"learning_rate": 5e-05,
"loss": 1.7672,
"step": 26
},
{
"epoch": 0.06240069334103712,
"grad_norm": 1.6695570945739746,
"learning_rate": 4.6729843538492847e-05,
"loss": 1.7331,
"step": 27
},
{
"epoch": 0.06471183013144591,
"grad_norm": 1.470428705215454,
"learning_rate": 4.347369038899744e-05,
"loss": 1.8564,
"step": 28
},
{
"epoch": 0.0670229669218547,
"grad_norm": 1.9413001537322998,
"learning_rate": 4.0245483899193595e-05,
"loss": 2.0338,
"step": 29
},
{
"epoch": 0.06933410371226346,
"grad_norm": 1.4334675073623657,
"learning_rate": 3.705904774487396e-05,
"loss": 1.9684,
"step": 30
},
{
"epoch": 0.07164524050267225,
"grad_norm": 1.4908390045166016,
"learning_rate": 3.392802673484193e-05,
"loss": 1.7507,
"step": 31
},
{
"epoch": 0.07395637729308104,
"grad_norm": 1.3455537557601929,
"learning_rate": 3.086582838174551e-05,
"loss": 1.818,
"step": 32
},
{
"epoch": 0.07626751408348982,
"grad_norm": 1.1437408924102783,
"learning_rate": 2.7885565489049946e-05,
"loss": 1.7837,
"step": 33
},
{
"epoch": 0.07857865087389859,
"grad_norm": 1.5162749290466309,
"learning_rate": 2.500000000000001e-05,
"loss": 1.8623,
"step": 34
},
{
"epoch": 0.08088978766430738,
"grad_norm": 1.4613982439041138,
"learning_rate": 2.2221488349019903e-05,
"loss": 2.086,
"step": 35
},
{
"epoch": 0.08320092445471616,
"grad_norm": 0.9104688763618469,
"learning_rate": 1.9561928549563968e-05,
"loss": 2.151,
"step": 36
},
{
"epoch": 0.08551206124512495,
"grad_norm": 1.7871757745742798,
"learning_rate": 1.703270924499656e-05,
"loss": 2.235,
"step": 37
},
{
"epoch": 0.08782319803553373,
"grad_norm": 1.5214916467666626,
"learning_rate": 1.4644660940672627e-05,
"loss": 1.9367,
"step": 38
},
{
"epoch": 0.0901343348259425,
"grad_norm": 2.2295875549316406,
"learning_rate": 1.2408009626051137e-05,
"loss": 1.7298,
"step": 39
},
{
"epoch": 0.09244547161635129,
"grad_norm": 1.6196082830429077,
"learning_rate": 1.0332332985438248e-05,
"loss": 1.7052,
"step": 40
},
{
"epoch": 0.09475660840676008,
"grad_norm": 1.135953664779663,
"learning_rate": 8.426519384872733e-06,
"loss": 1.8029,
"step": 41
},
{
"epoch": 0.09706774519716886,
"grad_norm": 0.9740651845932007,
"learning_rate": 6.698729810778065e-06,
"loss": 1.9533,
"step": 42
},
{
"epoch": 0.09937888198757763,
"grad_norm": 1.0038777589797974,
"learning_rate": 5.156362923365588e-06,
"loss": 1.7188,
"step": 43
},
{
"epoch": 0.10169001877798642,
"grad_norm": 1.0359888076782227,
"learning_rate": 3.8060233744356633e-06,
"loss": 1.746,
"step": 44
},
{
"epoch": 0.1040011555683952,
"grad_norm": 1.179372787475586,
"learning_rate": 2.653493525244721e-06,
"loss": 1.8965,
"step": 45
},
{
"epoch": 0.10631229235880399,
"grad_norm": 1.2560983896255493,
"learning_rate": 1.70370868554659e-06,
"loss": 1.8588,
"step": 46
},
{
"epoch": 0.10862342914921277,
"grad_norm": 1.3571735620498657,
"learning_rate": 9.607359798384785e-07,
"loss": 1.9218,
"step": 47
},
{
"epoch": 0.11093456593962155,
"grad_norm": 1.9261860847473145,
"learning_rate": 4.277569313094809e-07,
"loss": 2.1027,
"step": 48
},
{
"epoch": 0.11324570273003033,
"grad_norm": 1.3769975900650024,
"learning_rate": 1.0705383806982606e-07,
"loss": 2.0781,
"step": 49
},
{
"epoch": 0.11555683952043912,
"grad_norm": 1.6940319538116455,
"learning_rate": 0.0,
"loss": 2.5252,
"step": 50
},
{
"epoch": 0.11555683952043912,
"eval_loss": 2.0190470218658447,
"eval_runtime": 88.4389,
"eval_samples_per_second": 32.961,
"eval_steps_per_second": 4.127,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.968083617316864e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}