|
{ |
|
"best_metric": 0.03739364445209503, |
|
"best_model_checkpoint": "doc-topic-model_eval-01_train-02/checkpoint-11000", |
|
"epoch": 7.889546351084813, |
|
"eval_steps": 1000, |
|
"global_step": 16000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.2465483234714004, |
|
"grad_norm": 0.3822348415851593, |
|
"learning_rate": 1.9950690335305722e-05, |
|
"loss": 0.1664, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.4930966469428008, |
|
"grad_norm": 0.40402480959892273, |
|
"learning_rate": 1.9901380670611442e-05, |
|
"loss": 0.0944, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.4930966469428008, |
|
"eval_accuracy": 0.981383845685049, |
|
"eval_f1": 0.0, |
|
"eval_loss": 0.089978888630867, |
|
"eval_precision": 0.0, |
|
"eval_recall": 0.0, |
|
"eval_runtime": 11.9647, |
|
"eval_samples_per_second": 677.827, |
|
"eval_steps_per_second": 2.675, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.7396449704142012, |
|
"grad_norm": 0.385586678981781, |
|
"learning_rate": 1.9852071005917162e-05, |
|
"loss": 0.0863, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.9861932938856016, |
|
"grad_norm": 0.3922920525074005, |
|
"learning_rate": 1.980276134122288e-05, |
|
"loss": 0.0769, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.9861932938856016, |
|
"eval_accuracy": 0.9813966013294216, |
|
"eval_f1": 0.0015213753232922562, |
|
"eval_loss": 0.06878085434436798, |
|
"eval_precision": 0.9090909090909091, |
|
"eval_recall": 0.0007613247049866769, |
|
"eval_runtime": 14.1244, |
|
"eval_samples_per_second": 574.184, |
|
"eval_steps_per_second": 2.266, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.232741617357002, |
|
"grad_norm": 0.40515828132629395, |
|
"learning_rate": 1.9753451676528602e-05, |
|
"loss": 0.0677, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.4792899408284024, |
|
"grad_norm": 0.3946343958377838, |
|
"learning_rate": 1.9704142011834322e-05, |
|
"loss": 0.0607, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.4792899408284024, |
|
"eval_accuracy": 0.982203041512536, |
|
"eval_f1": 0.10543563439481371, |
|
"eval_loss": 0.05613817274570465, |
|
"eval_precision": 0.8203991130820399, |
|
"eval_recall": 0.056338028169014086, |
|
"eval_runtime": 14.2341, |
|
"eval_samples_per_second": 569.76, |
|
"eval_steps_per_second": 2.248, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.725838264299803, |
|
"grad_norm": 0.35836198925971985, |
|
"learning_rate": 1.965483234714004e-05, |
|
"loss": 0.0564, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.972386587771203, |
|
"grad_norm": 0.385370135307312, |
|
"learning_rate": 1.9605522682445763e-05, |
|
"loss": 0.0535, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.972386587771203, |
|
"eval_accuracy": 0.9844537040973964, |
|
"eval_f1": 0.36825433392846857, |
|
"eval_loss": 0.05005401372909546, |
|
"eval_precision": 0.7561494796594135, |
|
"eval_recall": 0.24339550818424058, |
|
"eval_runtime": 14.2688, |
|
"eval_samples_per_second": 568.374, |
|
"eval_steps_per_second": 2.243, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.2189349112426036, |
|
"grad_norm": 0.3222791850566864, |
|
"learning_rate": 1.9556213017751483e-05, |
|
"loss": 0.0476, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.465483234714004, |
|
"grad_norm": 0.3560543656349182, |
|
"learning_rate": 1.95069033530572e-05, |
|
"loss": 0.0466, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.465483234714004, |
|
"eval_accuracy": 0.9856697421942543, |
|
"eval_f1": 0.48531432934588953, |
|
"eval_loss": 0.04508376494050026, |
|
"eval_precision": 0.7322580645161291, |
|
"eval_recall": 0.3629234868671488, |
|
"eval_runtime": 14.2501, |
|
"eval_samples_per_second": 569.119, |
|
"eval_steps_per_second": 2.246, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.712031558185404, |
|
"grad_norm": 0.48908740282058716, |
|
"learning_rate": 1.9457593688362923e-05, |
|
"loss": 0.0453, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.9585798816568047, |
|
"grad_norm": 0.3385593891143799, |
|
"learning_rate": 1.940828402366864e-05, |
|
"loss": 0.0441, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.9585798816568047, |
|
"eval_accuracy": 0.9862465807786612, |
|
"eval_f1": 0.5088571717785201, |
|
"eval_loss": 0.04229620099067688, |
|
"eval_precision": 0.7590215914238261, |
|
"eval_recall": 0.3827179291968024, |
|
"eval_runtime": 14.2621, |
|
"eval_samples_per_second": 568.641, |
|
"eval_steps_per_second": 2.244, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 3.2051282051282053, |
|
"grad_norm": 0.30710557103157043, |
|
"learning_rate": 1.935897435897436e-05, |
|
"loss": 0.039, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 3.4516765285996054, |
|
"grad_norm": 0.38062548637390137, |
|
"learning_rate": 1.930966469428008e-05, |
|
"loss": 0.0391, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.4516765285996054, |
|
"eval_accuracy": 0.9865994869396375, |
|
"eval_f1": 0.5537779036292416, |
|
"eval_loss": 0.0406394861638546, |
|
"eval_precision": 0.728457909113484, |
|
"eval_recall": 0.4466692044156833, |
|
"eval_runtime": 14.3173, |
|
"eval_samples_per_second": 566.446, |
|
"eval_steps_per_second": 2.235, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.698224852071006, |
|
"grad_norm": 0.5038283467292786, |
|
"learning_rate": 1.92603550295858e-05, |
|
"loss": 0.0386, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 3.9447731755424065, |
|
"grad_norm": 0.3748466968536377, |
|
"learning_rate": 1.921104536489152e-05, |
|
"loss": 0.0372, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.9447731755424065, |
|
"eval_accuracy": 0.9869070396983999, |
|
"eval_f1": 0.5536766837375592, |
|
"eval_loss": 0.03953782096505165, |
|
"eval_precision": 0.757635858786196, |
|
"eval_recall": 0.43623905595736584, |
|
"eval_runtime": 14.3803, |
|
"eval_samples_per_second": 563.966, |
|
"eval_steps_per_second": 2.225, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 4.191321499013807, |
|
"grad_norm": 0.4667779505252838, |
|
"learning_rate": 1.916173570019724e-05, |
|
"loss": 0.0357, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 4.437869822485207, |
|
"grad_norm": 0.341769278049469, |
|
"learning_rate": 1.911242603550296e-05, |
|
"loss": 0.0336, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.437869822485207, |
|
"eval_accuracy": 0.9870884533072551, |
|
"eval_f1": 0.5703640822486323, |
|
"eval_loss": 0.03866717591881752, |
|
"eval_precision": 0.7494113273020201, |
|
"eval_recall": 0.46037304910544347, |
|
"eval_runtime": 14.1469, |
|
"eval_samples_per_second": 573.27, |
|
"eval_steps_per_second": 2.262, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.684418145956608, |
|
"grad_norm": 0.4473079442977905, |
|
"learning_rate": 1.906311637080868e-05, |
|
"loss": 0.0336, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 4.930966469428008, |
|
"grad_norm": 0.4554208219051361, |
|
"learning_rate": 1.90138067061144e-05, |
|
"loss": 0.0337, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 4.930966469428008, |
|
"eval_accuracy": 0.9872131751633431, |
|
"eval_f1": 0.5864882207351728, |
|
"eval_loss": 0.03805249184370041, |
|
"eval_precision": 0.7368421052631579, |
|
"eval_recall": 0.48709554625047585, |
|
"eval_runtime": 14.2923, |
|
"eval_samples_per_second": 567.44, |
|
"eval_steps_per_second": 2.239, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 5.177514792899408, |
|
"grad_norm": 0.4344967007637024, |
|
"learning_rate": 1.896459566074951e-05, |
|
"loss": 0.0306, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 5.424063116370808, |
|
"grad_norm": 0.38051480054855347, |
|
"learning_rate": 1.891528599605523e-05, |
|
"loss": 0.0297, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 5.424063116370808, |
|
"eval_accuracy": 0.987422934648582, |
|
"eval_f1": 0.6050734312416556, |
|
"eval_loss": 0.03739364445209503, |
|
"eval_precision": 0.7282271023031601, |
|
"eval_recall": 0.5175485344499429, |
|
"eval_runtime": 14.3651, |
|
"eval_samples_per_second": 564.563, |
|
"eval_steps_per_second": 2.228, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 5.670611439842209, |
|
"grad_norm": 0.4212055504322052, |
|
"learning_rate": 1.886597633136095e-05, |
|
"loss": 0.0301, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 5.9171597633136095, |
|
"grad_norm": 0.5279490351676941, |
|
"learning_rate": 1.881666666666667e-05, |
|
"loss": 0.0296, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 5.9171597633136095, |
|
"eval_accuracy": 0.9872174270448006, |
|
"eval_f1": 0.5795534007738566, |
|
"eval_loss": 0.03829270973801613, |
|
"eval_precision": 0.7474747474747475, |
|
"eval_recall": 0.4732394366197183, |
|
"eval_runtime": 14.2161, |
|
"eval_samples_per_second": 570.478, |
|
"eval_steps_per_second": 2.251, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 6.16370808678501, |
|
"grad_norm": 0.3991893231868744, |
|
"learning_rate": 1.8767455621301777e-05, |
|
"loss": 0.0273, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 6.410256410256411, |
|
"grad_norm": 0.18142229318618774, |
|
"learning_rate": 1.8718145956607497e-05, |
|
"loss": 0.0263, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 6.410256410256411, |
|
"eval_accuracy": 0.9872797879728447, |
|
"eval_f1": 0.6096298551607151, |
|
"eval_loss": 0.03814740851521492, |
|
"eval_precision": 0.711038961038961, |
|
"eval_recall": 0.5335363532546631, |
|
"eval_runtime": 14.2926, |
|
"eval_samples_per_second": 567.426, |
|
"eval_steps_per_second": 2.239, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 6.65680473372781, |
|
"grad_norm": 0.3087170124053955, |
|
"learning_rate": 1.8668836291913217e-05, |
|
"loss": 0.0266, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 6.903353057199211, |
|
"grad_norm": 0.2865307033061981, |
|
"learning_rate": 1.8619526627218937e-05, |
|
"loss": 0.0272, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 6.903353057199211, |
|
"eval_accuracy": 0.9874016752412943, |
|
"eval_f1": 0.6193310778981628, |
|
"eval_loss": 0.03800670802593231, |
|
"eval_precision": 0.7078112764291308, |
|
"eval_recall": 0.550513894175866, |
|
"eval_runtime": 14.1986, |
|
"eval_samples_per_second": 571.184, |
|
"eval_steps_per_second": 2.254, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 7.149901380670611, |
|
"grad_norm": 0.21536009013652802, |
|
"learning_rate": 1.8570315581854045e-05, |
|
"loss": 0.0253, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 7.396449704142012, |
|
"grad_norm": 0.20045433938503265, |
|
"learning_rate": 1.8521005917159765e-05, |
|
"loss": 0.0234, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 7.396449704142012, |
|
"eval_accuracy": 0.9876213557832674, |
|
"eval_f1": 0.6177680525164114, |
|
"eval_loss": 0.03792005032300949, |
|
"eval_precision": 0.7265054040144107, |
|
"eval_recall": 0.5373429767795965, |
|
"eval_runtime": 14.2941, |
|
"eval_samples_per_second": 567.368, |
|
"eval_steps_per_second": 2.239, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 7.642998027613412, |
|
"grad_norm": 0.23305435478687286, |
|
"learning_rate": 1.8471696252465485e-05, |
|
"loss": 0.0226, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 7.889546351084813, |
|
"grad_norm": 0.5422170758247375, |
|
"learning_rate": 1.8422386587771205e-05, |
|
"loss": 0.0243, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 7.889546351084813, |
|
"eval_accuracy": 0.9876667091854813, |
|
"eval_f1": 0.6212569637883009, |
|
"eval_loss": 0.03793339431285858, |
|
"eval_precision": 0.7252311756935271, |
|
"eval_recall": 0.5433574419489913, |
|
"eval_runtime": 14.2633, |
|
"eval_samples_per_second": 568.591, |
|
"eval_steps_per_second": 2.244, |
|
"step": 16000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 202800, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 100, |
|
"save_steps": 1000, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 361390664998512.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|