mikhail-panzo's picture
Training in progress, step 1000, checkpoint
34f9ecf verified
raw
history blame
No virus
4.55 kB
{
"best_metric": 0.41002073884010315,
"best_model_checkpoint": "mikhail-panzo/zlm-fil-ceb_b64_le5_s8000/checkpoint-1000",
"epoch": 39.603960396039604,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.9801980198019802,
"grad_norm": 1.2683665752410889,
"learning_rate": 2.5000000000000004e-07,
"loss": 0.4932,
"step": 50
},
{
"epoch": 3.9603960396039604,
"grad_norm": 1.1266463994979858,
"learning_rate": 5.000000000000001e-07,
"loss": 0.4898,
"step": 100
},
{
"epoch": 5.9405940594059405,
"grad_norm": 1.1800968647003174,
"learning_rate": 7.5e-07,
"loss": 0.4839,
"step": 150
},
{
"epoch": 7.920792079207921,
"grad_norm": 0.8235568404197693,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.4785,
"step": 200
},
{
"epoch": 9.900990099009901,
"grad_norm": 1.313211441040039,
"learning_rate": 1.25e-06,
"loss": 0.4767,
"step": 250
},
{
"epoch": 11.881188118811881,
"grad_norm": 0.7831560373306274,
"learning_rate": 1.5e-06,
"loss": 0.4681,
"step": 300
},
{
"epoch": 13.861386138613861,
"grad_norm": 0.7987237572669983,
"learning_rate": 1.75e-06,
"loss": 0.4658,
"step": 350
},
{
"epoch": 15.841584158415841,
"grad_norm": 0.7143813371658325,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.4627,
"step": 400
},
{
"epoch": 17.821782178217823,
"grad_norm": 0.8037531971931458,
"learning_rate": 2.25e-06,
"loss": 0.461,
"step": 450
},
{
"epoch": 19.801980198019802,
"grad_norm": 0.79031902551651,
"learning_rate": 2.5e-06,
"loss": 0.4592,
"step": 500
},
{
"epoch": 19.801980198019802,
"eval_loss": 0.42528408765792847,
"eval_runtime": 7.8982,
"eval_samples_per_second": 22.79,
"eval_steps_per_second": 2.912,
"step": 500
},
{
"epoch": 21.782178217821784,
"grad_norm": 0.7966288328170776,
"learning_rate": 2.7500000000000004e-06,
"loss": 0.4551,
"step": 550
},
{
"epoch": 23.762376237623762,
"grad_norm": 0.8126183152198792,
"learning_rate": 3e-06,
"loss": 0.4542,
"step": 600
},
{
"epoch": 25.742574257425744,
"grad_norm": 0.6602805852890015,
"learning_rate": 3.2500000000000002e-06,
"loss": 0.4535,
"step": 650
},
{
"epoch": 27.722772277227723,
"grad_norm": 0.7177000641822815,
"learning_rate": 3.5e-06,
"loss": 0.4484,
"step": 700
},
{
"epoch": 29.702970297029704,
"grad_norm": 0.7912996411323547,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.4486,
"step": 750
},
{
"epoch": 31.683168316831683,
"grad_norm": 0.6251747608184814,
"learning_rate": 4.000000000000001e-06,
"loss": 0.4463,
"step": 800
},
{
"epoch": 33.663366336633665,
"grad_norm": 1.003780722618103,
"learning_rate": 4.25e-06,
"loss": 0.4474,
"step": 850
},
{
"epoch": 35.64356435643565,
"grad_norm": 0.742171585559845,
"learning_rate": 4.5e-06,
"loss": 0.4434,
"step": 900
},
{
"epoch": 37.62376237623762,
"grad_norm": 0.6034647822380066,
"learning_rate": 4.75e-06,
"loss": 0.4425,
"step": 950
},
{
"epoch": 39.603960396039604,
"grad_norm": 0.6990635395050049,
"learning_rate": 5e-06,
"loss": 0.4381,
"step": 1000
},
{
"epoch": 39.603960396039604,
"eval_loss": 0.41002073884010315,
"eval_runtime": 6.7782,
"eval_samples_per_second": 26.556,
"eval_steps_per_second": 3.393,
"step": 1000
}
],
"logging_steps": 50,
"max_steps": 8000,
"num_input_tokens_seen": 0,
"num_train_epochs": 320,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.0803690034289064e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}