|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.0026279942709724895, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 3.5039923612966525e-05, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 3.5039923612966525e-05, |
|
"eval_loss": NaN, |
|
"eval_runtime": 568.7169, |
|
"eval_samples_per_second": 42.258, |
|
"eval_steps_per_second": 21.13, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 7.007984722593305e-05, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00013333333333333334, |
|
"loss": 0.0, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.00010511977083889957, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0002, |
|
"loss": 0.0, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0001401596944518661, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001999048221581858, |
|
"loss": 0.0, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.00017519961806483262, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019961946980917456, |
|
"loss": 0.0, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.00021023954167779914, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 0.0, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.00024527946529076565, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019848077530122083, |
|
"loss": 0.0, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0002803193889037322, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019762960071199333, |
|
"loss": 0.0, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0003153593125166987, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 0.0, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.00035039923612966524, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001953716950748227, |
|
"loss": 0.0, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0003854391597426318, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019396926207859084, |
|
"loss": 0.0, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.00042047908335559827, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 0.0, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0004555190069685648, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.000190630778703665, |
|
"loss": 0.0, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0004905589305815313, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00018870108331782217, |
|
"loss": 0.0, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0005255988541944979, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 0.0, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0005606387778074644, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001843391445812886, |
|
"loss": 0.0, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0005956787014204309, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001819152044288992, |
|
"loss": 0.0, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0006307186250333974, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 0.0, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0006657585486463639, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001766044443118978, |
|
"loss": 0.0, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0007007984722593305, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001737277336810124, |
|
"loss": 0.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.000735838395872297, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 0.0, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0007708783194852636, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00016755902076156604, |
|
"loss": 0.0, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.00080591824309823, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00016427876096865394, |
|
"loss": 0.0, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0008409581667111965, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 0.0, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0008759980903241631, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001573576436351046, |
|
"loss": 0.0, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0008759980903241631, |
|
"eval_loss": NaN, |
|
"eval_runtime": 568.3835, |
|
"eval_samples_per_second": 42.283, |
|
"eval_steps_per_second": 21.142, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0009110380139371296, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001537299608346824, |
|
"loss": 0.0, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.0009460779375500962, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 0.0, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0009811178611630626, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00014617486132350343, |
|
"loss": 0.0, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0010161577847760293, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00014226182617406996, |
|
"loss": 0.0, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0010511977083889957, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 0.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0010862376320019621, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00013420201433256689, |
|
"loss": 0.0, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.0011212775556149288, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00013007057995042732, |
|
"loss": 0.0, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.0011563174792278952, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 0.0, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.0011913574028408619, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00012164396139381029, |
|
"loss": 0.0, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.0012263973264538283, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00011736481776669306, |
|
"loss": 0.0, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0012614372500667948, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00011305261922200519, |
|
"loss": 0.0, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0012964771736797614, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00010871557427476583, |
|
"loss": 0.0, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.0013315170972927279, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00010436193873653361, |
|
"loss": 0.0, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.0013665570209056945, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001, |
|
"loss": 0.0, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.001401596944518661, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.563806126346642e-05, |
|
"loss": 0.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0014366368681316274, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.128442572523417e-05, |
|
"loss": 0.0, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.001471676791744594, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.694738077799488e-05, |
|
"loss": 0.0, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0015067167153575605, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.263518223330697e-05, |
|
"loss": 0.0, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.0015417566389705271, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.835603860618972e-05, |
|
"loss": 0.0, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.0015767965625834936, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 0.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.00161183648619646, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.992942004957271e-05, |
|
"loss": 0.0, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.0016468764098094267, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.579798566743314e-05, |
|
"loss": 0.0, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.001681916333422393, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 0.0, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.0017169562570353597, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.773817382593008e-05, |
|
"loss": 0.0, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0017519961806483262, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.382513867649663e-05, |
|
"loss": 0.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0017519961806483262, |
|
"eval_loss": NaN, |
|
"eval_runtime": 568.4685, |
|
"eval_samples_per_second": 42.277, |
|
"eval_steps_per_second": 21.139, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0017870361042612926, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 0.0, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.0018220760278742593, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.6270039165317605e-05, |
|
"loss": 0.0, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.0018571159514872257, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.264235636489542e-05, |
|
"loss": 0.0, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.0018921558751001924, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.9123857099127936e-05, |
|
"loss": 0.0, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.0019271957987131588, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.5721239031346066e-05, |
|
"loss": 0.0, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.0019622357223261252, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.244097923843398e-05, |
|
"loss": 0.0, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.001997275645939092, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 0.0, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.0020323155695520585, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.6272266318987603e-05, |
|
"loss": 0.0, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.0020673554931650248, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.339555568810221e-05, |
|
"loss": 0.0, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.0021023954167779914, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.0664665970876496e-05, |
|
"loss": 0.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.002137435340390958, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.808479557110081e-05, |
|
"loss": 0.0, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.0021724752640039243, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.566085541871145e-05, |
|
"loss": 0.0, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.002207515187616891, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 0.0, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.0022425551112298576, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.129891668217783e-05, |
|
"loss": 0.0, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.0022775950348428243, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.369221296335006e-06, |
|
"loss": 0.0, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.0023126349584557905, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 0.0, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.002347674882068757, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.030737921409169e-06, |
|
"loss": 0.0, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.0023827148056817238, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.628304925177318e-06, |
|
"loss": 0.0, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.00241775472929469, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.40741737109318e-06, |
|
"loss": 0.0, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.0024527946529076567, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.3703992880066638e-06, |
|
"loss": 0.0, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.0024878345765206233, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.5192246987791981e-06, |
|
"loss": 0.0, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.0025228745001335895, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.555138626189618e-07, |
|
"loss": 0.0, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.002557914423746556, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.805301908254455e-07, |
|
"loss": 0.0, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.002592954347359523, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.517784181422019e-08, |
|
"loss": 0.0, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.0026279942709724895, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0, |
|
"loss": 0.0, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.0026279942709724895, |
|
"eval_loss": NaN, |
|
"eval_runtime": 568.4882, |
|
"eval_samples_per_second": 42.275, |
|
"eval_steps_per_second": 21.139, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 92484403200.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|