jssky's picture
Training in progress, step 50, checkpoint
a780018 verified
{
"best_metric": 1.6797080039978027,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.09363295880149813,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0018726591760299626,
"grad_norm": 1.7517485618591309,
"learning_rate": 5e-05,
"loss": 4.2167,
"step": 1
},
{
"epoch": 0.0018726591760299626,
"eval_loss": 5.32598352432251,
"eval_runtime": 242.9608,
"eval_samples_per_second": 14.809,
"eval_steps_per_second": 1.852,
"step": 1
},
{
"epoch": 0.003745318352059925,
"grad_norm": 2.233262777328491,
"learning_rate": 0.0001,
"loss": 4.7102,
"step": 2
},
{
"epoch": 0.0056179775280898875,
"grad_norm": 2.4297382831573486,
"learning_rate": 9.989294616193017e-05,
"loss": 4.8826,
"step": 3
},
{
"epoch": 0.00749063670411985,
"grad_norm": 3.3503963947296143,
"learning_rate": 9.957224306869053e-05,
"loss": 4.4998,
"step": 4
},
{
"epoch": 0.009363295880149813,
"grad_norm": 3.326855421066284,
"learning_rate": 9.903926402016153e-05,
"loss": 3.9762,
"step": 5
},
{
"epoch": 0.011235955056179775,
"grad_norm": 5.497750759124756,
"learning_rate": 9.829629131445342e-05,
"loss": 3.4664,
"step": 6
},
{
"epoch": 0.013108614232209739,
"grad_norm": 3.591024398803711,
"learning_rate": 9.73465064747553e-05,
"loss": 2.8895,
"step": 7
},
{
"epoch": 0.0149812734082397,
"grad_norm": 3.615856647491455,
"learning_rate": 9.619397662556435e-05,
"loss": 2.6043,
"step": 8
},
{
"epoch": 0.016853932584269662,
"grad_norm": 2.7465076446533203,
"learning_rate": 9.484363707663442e-05,
"loss": 2.3626,
"step": 9
},
{
"epoch": 0.018726591760299626,
"grad_norm": 3.563035011291504,
"learning_rate": 9.330127018922194e-05,
"loss": 2.0889,
"step": 10
},
{
"epoch": 0.020599250936329586,
"grad_norm": 2.8092472553253174,
"learning_rate": 9.157348061512727e-05,
"loss": 1.9644,
"step": 11
},
{
"epoch": 0.02247191011235955,
"grad_norm": 2.3839683532714844,
"learning_rate": 8.966766701456177e-05,
"loss": 1.8961,
"step": 12
},
{
"epoch": 0.024344569288389514,
"grad_norm": 2.2432806491851807,
"learning_rate": 8.759199037394887e-05,
"loss": 2.0467,
"step": 13
},
{
"epoch": 0.026217228464419477,
"grad_norm": 1.7278889417648315,
"learning_rate": 8.535533905932738e-05,
"loss": 1.9476,
"step": 14
},
{
"epoch": 0.028089887640449437,
"grad_norm": 1.5298266410827637,
"learning_rate": 8.296729075500344e-05,
"loss": 1.9927,
"step": 15
},
{
"epoch": 0.0299625468164794,
"grad_norm": 1.4526454210281372,
"learning_rate": 8.043807145043604e-05,
"loss": 1.8496,
"step": 16
},
{
"epoch": 0.031835205992509365,
"grad_norm": 1.2890679836273193,
"learning_rate": 7.777851165098012e-05,
"loss": 1.7717,
"step": 17
},
{
"epoch": 0.033707865168539325,
"grad_norm": 1.2238835096359253,
"learning_rate": 7.500000000000001e-05,
"loss": 1.8753,
"step": 18
},
{
"epoch": 0.035580524344569285,
"grad_norm": 1.2657208442687988,
"learning_rate": 7.211443451095007e-05,
"loss": 1.6777,
"step": 19
},
{
"epoch": 0.03745318352059925,
"grad_norm": 1.1719173192977905,
"learning_rate": 6.91341716182545e-05,
"loss": 1.6835,
"step": 20
},
{
"epoch": 0.03932584269662921,
"grad_norm": 1.0336369276046753,
"learning_rate": 6.607197326515808e-05,
"loss": 1.7054,
"step": 21
},
{
"epoch": 0.04119850187265917,
"grad_norm": 1.1664713621139526,
"learning_rate": 6.294095225512603e-05,
"loss": 1.6424,
"step": 22
},
{
"epoch": 0.04307116104868914,
"grad_norm": 1.1926559209823608,
"learning_rate": 5.9754516100806423e-05,
"loss": 1.6149,
"step": 23
},
{
"epoch": 0.0449438202247191,
"grad_norm": 1.1861246824264526,
"learning_rate": 5.6526309611002594e-05,
"loss": 1.6253,
"step": 24
},
{
"epoch": 0.04681647940074907,
"grad_norm": 1.3473325967788696,
"learning_rate": 5.327015646150716e-05,
"loss": 1.5603,
"step": 25
},
{
"epoch": 0.04681647940074907,
"eval_loss": 1.7174628973007202,
"eval_runtime": 242.9675,
"eval_samples_per_second": 14.809,
"eval_steps_per_second": 1.852,
"step": 25
},
{
"epoch": 0.04868913857677903,
"grad_norm": 2.2478160858154297,
"learning_rate": 5e-05,
"loss": 2.0625,
"step": 26
},
{
"epoch": 0.05056179775280899,
"grad_norm": 1.5112407207489014,
"learning_rate": 4.6729843538492847e-05,
"loss": 1.8351,
"step": 27
},
{
"epoch": 0.052434456928838954,
"grad_norm": 1.1244219541549683,
"learning_rate": 4.347369038899744e-05,
"loss": 1.7192,
"step": 28
},
{
"epoch": 0.054307116104868915,
"grad_norm": 1.005778431892395,
"learning_rate": 4.0245483899193595e-05,
"loss": 1.685,
"step": 29
},
{
"epoch": 0.056179775280898875,
"grad_norm": 0.9678806662559509,
"learning_rate": 3.705904774487396e-05,
"loss": 1.6409,
"step": 30
},
{
"epoch": 0.05805243445692884,
"grad_norm": 1.0039587020874023,
"learning_rate": 3.392802673484193e-05,
"loss": 1.6625,
"step": 31
},
{
"epoch": 0.0599250936329588,
"grad_norm": 1.1212393045425415,
"learning_rate": 3.086582838174551e-05,
"loss": 1.5962,
"step": 32
},
{
"epoch": 0.06179775280898876,
"grad_norm": 1.1912685632705688,
"learning_rate": 2.7885565489049946e-05,
"loss": 1.5737,
"step": 33
},
{
"epoch": 0.06367041198501873,
"grad_norm": 1.0543924570083618,
"learning_rate": 2.500000000000001e-05,
"loss": 1.6632,
"step": 34
},
{
"epoch": 0.06554307116104868,
"grad_norm": 1.2754708528518677,
"learning_rate": 2.2221488349019903e-05,
"loss": 1.5752,
"step": 35
},
{
"epoch": 0.06741573033707865,
"grad_norm": 1.1570887565612793,
"learning_rate": 1.9561928549563968e-05,
"loss": 1.5309,
"step": 36
},
{
"epoch": 0.06928838951310862,
"grad_norm": 1.335957646369934,
"learning_rate": 1.703270924499656e-05,
"loss": 1.5368,
"step": 37
},
{
"epoch": 0.07116104868913857,
"grad_norm": 1.1365514993667603,
"learning_rate": 1.4644660940672627e-05,
"loss": 1.8507,
"step": 38
},
{
"epoch": 0.07303370786516854,
"grad_norm": 1.1642248630523682,
"learning_rate": 1.2408009626051137e-05,
"loss": 1.7966,
"step": 39
},
{
"epoch": 0.0749063670411985,
"grad_norm": 1.0239207744598389,
"learning_rate": 1.0332332985438248e-05,
"loss": 1.6646,
"step": 40
},
{
"epoch": 0.07677902621722846,
"grad_norm": 1.0390326976776123,
"learning_rate": 8.426519384872733e-06,
"loss": 1.7976,
"step": 41
},
{
"epoch": 0.07865168539325842,
"grad_norm": 0.9292603731155396,
"learning_rate": 6.698729810778065e-06,
"loss": 1.6664,
"step": 42
},
{
"epoch": 0.08052434456928839,
"grad_norm": 0.9708360433578491,
"learning_rate": 5.156362923365588e-06,
"loss": 1.6991,
"step": 43
},
{
"epoch": 0.08239700374531835,
"grad_norm": 0.917884349822998,
"learning_rate": 3.8060233744356633e-06,
"loss": 1.6031,
"step": 44
},
{
"epoch": 0.08426966292134831,
"grad_norm": 0.955154538154602,
"learning_rate": 2.653493525244721e-06,
"loss": 1.7643,
"step": 45
},
{
"epoch": 0.08614232209737828,
"grad_norm": 0.9974094033241272,
"learning_rate": 1.70370868554659e-06,
"loss": 1.597,
"step": 46
},
{
"epoch": 0.08801498127340825,
"grad_norm": 0.9787687063217163,
"learning_rate": 9.607359798384785e-07,
"loss": 1.618,
"step": 47
},
{
"epoch": 0.0898876404494382,
"grad_norm": 0.9520777463912964,
"learning_rate": 4.277569313094809e-07,
"loss": 1.6154,
"step": 48
},
{
"epoch": 0.09176029962546817,
"grad_norm": 1.1157002449035645,
"learning_rate": 1.0705383806982606e-07,
"loss": 1.6452,
"step": 49
},
{
"epoch": 0.09363295880149813,
"grad_norm": 1.192071557044983,
"learning_rate": 0.0,
"loss": 1.5853,
"step": 50
},
{
"epoch": 0.09363295880149813,
"eval_loss": 1.6797080039978027,
"eval_runtime": 243.0062,
"eval_samples_per_second": 14.806,
"eval_steps_per_second": 1.852,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.968083617316864e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}