GaetanMichelet's picture
Model save
3934010 verified
{
"best_metric": 0.5365062355995178,
"best_model_checkpoint": "data/Mistral-7B_task-2_120-samples_config-2/checkpoint-33",
"epoch": 12.909090909090908,
"eval_steps": 500,
"global_step": 71,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.18181818181818182,
"grad_norm": 2.6919829845428467,
"learning_rate": 4.000000000000001e-06,
"loss": 1.1214,
"step": 1
},
{
"epoch": 0.36363636363636365,
"grad_norm": 2.3824000358581543,
"learning_rate": 8.000000000000001e-06,
"loss": 0.9355,
"step": 2
},
{
"epoch": 0.7272727272727273,
"grad_norm": 2.4275994300842285,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.027,
"step": 4
},
{
"epoch": 0.9090909090909091,
"eval_loss": 0.9180782437324524,
"eval_runtime": 30.0444,
"eval_samples_per_second": 0.799,
"eval_steps_per_second": 0.799,
"step": 5
},
{
"epoch": 1.0909090909090908,
"grad_norm": 2.098031759262085,
"learning_rate": 2.4e-05,
"loss": 0.8918,
"step": 6
},
{
"epoch": 1.4545454545454546,
"grad_norm": 1.4579511880874634,
"learning_rate": 3.2000000000000005e-05,
"loss": 0.8457,
"step": 8
},
{
"epoch": 1.8181818181818183,
"grad_norm": 0.9394379258155823,
"learning_rate": 4e-05,
"loss": 0.7519,
"step": 10
},
{
"epoch": 2.0,
"eval_loss": 0.6666383147239685,
"eval_runtime": 30.0768,
"eval_samples_per_second": 0.798,
"eval_steps_per_second": 0.798,
"step": 11
},
{
"epoch": 2.1818181818181817,
"grad_norm": 0.6056665182113647,
"learning_rate": 4.8e-05,
"loss": 0.6241,
"step": 12
},
{
"epoch": 2.5454545454545454,
"grad_norm": 0.5433595776557922,
"learning_rate": 5.6000000000000006e-05,
"loss": 0.6457,
"step": 14
},
{
"epoch": 2.909090909090909,
"grad_norm": 0.4983988404273987,
"learning_rate": 6.400000000000001e-05,
"loss": 0.5745,
"step": 16
},
{
"epoch": 2.909090909090909,
"eval_loss": 0.618429958820343,
"eval_runtime": 30.067,
"eval_samples_per_second": 0.798,
"eval_steps_per_second": 0.798,
"step": 16
},
{
"epoch": 3.2727272727272725,
"grad_norm": 0.4503048062324524,
"learning_rate": 7.2e-05,
"loss": 0.6112,
"step": 18
},
{
"epoch": 3.6363636363636362,
"grad_norm": 0.4419262409210205,
"learning_rate": 8e-05,
"loss": 0.5459,
"step": 20
},
{
"epoch": 4.0,
"grad_norm": 0.3880172371864319,
"learning_rate": 8.800000000000001e-05,
"loss": 0.5503,
"step": 22
},
{
"epoch": 4.0,
"eval_loss": 0.5729122757911682,
"eval_runtime": 30.0813,
"eval_samples_per_second": 0.798,
"eval_steps_per_second": 0.798,
"step": 22
},
{
"epoch": 4.363636363636363,
"grad_norm": 0.38346508145332336,
"learning_rate": 9.6e-05,
"loss": 0.518,
"step": 24
},
{
"epoch": 4.7272727272727275,
"grad_norm": 0.3963974416255951,
"learning_rate": 9.999512620046522e-05,
"loss": 0.5106,
"step": 26
},
{
"epoch": 4.909090909090909,
"eval_loss": 0.5532026886940002,
"eval_runtime": 30.075,
"eval_samples_per_second": 0.798,
"eval_steps_per_second": 0.798,
"step": 27
},
{
"epoch": 5.090909090909091,
"grad_norm": 0.3698035776615143,
"learning_rate": 9.995614150494293e-05,
"loss": 0.4509,
"step": 28
},
{
"epoch": 5.454545454545454,
"grad_norm": 0.40548670291900635,
"learning_rate": 9.987820251299122e-05,
"loss": 0.4604,
"step": 30
},
{
"epoch": 5.818181818181818,
"grad_norm": 0.40967628359794617,
"learning_rate": 9.976136999909156e-05,
"loss": 0.4338,
"step": 32
},
{
"epoch": 6.0,
"eval_loss": 0.5365062355995178,
"eval_runtime": 30.0548,
"eval_samples_per_second": 0.799,
"eval_steps_per_second": 0.799,
"step": 33
},
{
"epoch": 6.181818181818182,
"grad_norm": 0.3754049241542816,
"learning_rate": 9.96057350657239e-05,
"loss": 0.4068,
"step": 34
},
{
"epoch": 6.545454545454545,
"grad_norm": 0.3780439794063568,
"learning_rate": 9.941141907232765e-05,
"loss": 0.3681,
"step": 36
},
{
"epoch": 6.909090909090909,
"grad_norm": 0.4491212069988251,
"learning_rate": 9.917857354066931e-05,
"loss": 0.3544,
"step": 38
},
{
"epoch": 6.909090909090909,
"eval_loss": 0.5436187386512756,
"eval_runtime": 30.0652,
"eval_samples_per_second": 0.798,
"eval_steps_per_second": 0.798,
"step": 38
},
{
"epoch": 7.2727272727272725,
"grad_norm": 0.4693133234977722,
"learning_rate": 9.890738003669029e-05,
"loss": 0.3178,
"step": 40
},
{
"epoch": 7.636363636363637,
"grad_norm": 0.5338257551193237,
"learning_rate": 9.859805002892732e-05,
"loss": 0.2819,
"step": 42
},
{
"epoch": 8.0,
"grad_norm": 0.6368435621261597,
"learning_rate": 9.825082472361557e-05,
"loss": 0.2811,
"step": 44
},
{
"epoch": 8.0,
"eval_loss": 0.5942614674568176,
"eval_runtime": 30.081,
"eval_samples_per_second": 0.798,
"eval_steps_per_second": 0.798,
"step": 44
},
{
"epoch": 8.363636363636363,
"grad_norm": 0.6040387749671936,
"learning_rate": 9.786597487660337e-05,
"loss": 0.2141,
"step": 46
},
{
"epoch": 8.727272727272727,
"grad_norm": 0.8237395286560059,
"learning_rate": 9.744380058222483e-05,
"loss": 0.1854,
"step": 48
},
{
"epoch": 8.909090909090908,
"eval_loss": 0.6899933218955994,
"eval_runtime": 30.0753,
"eval_samples_per_second": 0.798,
"eval_steps_per_second": 0.798,
"step": 49
},
{
"epoch": 9.090909090909092,
"grad_norm": 1.074000358581543,
"learning_rate": 9.698463103929542e-05,
"loss": 0.1731,
"step": 50
},
{
"epoch": 9.454545454545455,
"grad_norm": 0.7525721192359924,
"learning_rate": 9.648882429441257e-05,
"loss": 0.1058,
"step": 52
},
{
"epoch": 9.818181818181818,
"grad_norm": 1.0988599061965942,
"learning_rate": 9.595676696276172e-05,
"loss": 0.1024,
"step": 54
},
{
"epoch": 10.0,
"eval_loss": 0.7491334080696106,
"eval_runtime": 30.4148,
"eval_samples_per_second": 0.789,
"eval_steps_per_second": 0.789,
"step": 55
},
{
"epoch": 10.181818181818182,
"grad_norm": 0.7116291522979736,
"learning_rate": 9.538887392664544e-05,
"loss": 0.069,
"step": 56
},
{
"epoch": 10.545454545454545,
"grad_norm": 0.9122013449668884,
"learning_rate": 9.478558801197065e-05,
"loss": 0.0493,
"step": 58
},
{
"epoch": 10.909090909090908,
"grad_norm": 0.8271774649620056,
"learning_rate": 9.414737964294636e-05,
"loss": 0.0434,
"step": 60
},
{
"epoch": 10.909090909090908,
"eval_loss": 0.8883001208305359,
"eval_runtime": 30.4277,
"eval_samples_per_second": 0.789,
"eval_steps_per_second": 0.789,
"step": 60
},
{
"epoch": 11.272727272727273,
"grad_norm": 0.7309849858283997,
"learning_rate": 9.347474647526095e-05,
"loss": 0.0262,
"step": 62
},
{
"epoch": 11.636363636363637,
"grad_norm": 0.7137559056282043,
"learning_rate": 9.276821300802534e-05,
"loss": 0.0235,
"step": 64
},
{
"epoch": 12.0,
"grad_norm": 0.982509195804596,
"learning_rate": 9.202833017478422e-05,
"loss": 0.0225,
"step": 66
},
{
"epoch": 12.0,
"eval_loss": 0.9759795665740967,
"eval_runtime": 30.415,
"eval_samples_per_second": 0.789,
"eval_steps_per_second": 0.789,
"step": 66
},
{
"epoch": 12.363636363636363,
"grad_norm": 0.678407609462738,
"learning_rate": 9.125567491391476e-05,
"loss": 0.0134,
"step": 68
},
{
"epoch": 12.727272727272727,
"grad_norm": 0.6695177555084229,
"learning_rate": 9.045084971874738e-05,
"loss": 0.0151,
"step": 70
},
{
"epoch": 12.909090909090908,
"eval_loss": 0.9967642426490784,
"eval_runtime": 30.3924,
"eval_samples_per_second": 0.79,
"eval_steps_per_second": 0.79,
"step": 71
},
{
"epoch": 12.909090909090908,
"step": 71,
"total_flos": 1.3485981876564787e+17,
"train_loss": 0.3812251945196743,
"train_runtime": 4931.148,
"train_samples_per_second": 0.892,
"train_steps_per_second": 0.051
}
],
"logging_steps": 2,
"max_steps": 250,
"num_input_tokens_seen": 0,
"num_train_epochs": 50,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 7,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.3485981876564787e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}