adapters-gemma-bnb8-QLORA-super_glue-cb
/
trainer_state-gemma-bnb8-QLORA-super_glue-cb-sequence_classification.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 5.161290322580645, | |
"eval_steps": 1, | |
"global_step": 10, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.5161290322580645, | |
"grad_norm": 447.3440246582031, | |
"learning_rate": 2.5e-05, | |
"loss": 24.8818, | |
"step": 1 | |
}, | |
{ | |
"epoch": 0.5161290322580645, | |
"eval_accuracy": 0.08064516129032258, | |
"eval_f1": 0.04975124378109453, | |
"eval_loss": 24.5625, | |
"eval_runtime": 11.7818, | |
"eval_samples_per_second": 5.262, | |
"eval_steps_per_second": 0.679, | |
"step": 1 | |
}, | |
{ | |
"epoch": 1.032258064516129, | |
"grad_norm": 452.3684387207031, | |
"learning_rate": 5e-05, | |
"loss": 25.0098, | |
"step": 2 | |
}, | |
{ | |
"epoch": 1.032258064516129, | |
"eval_accuracy": 0.08064516129032258, | |
"eval_f1": 0.04975124378109453, | |
"eval_loss": 18.320560455322266, | |
"eval_runtime": 11.8184, | |
"eval_samples_per_second": 5.246, | |
"eval_steps_per_second": 0.677, | |
"step": 2 | |
}, | |
{ | |
"epoch": 1.5483870967741935, | |
"grad_norm": 418.07275390625, | |
"learning_rate": 4.375e-05, | |
"loss": 18.3652, | |
"step": 3 | |
}, | |
{ | |
"epoch": 1.5483870967741935, | |
"eval_accuracy": 0.1774193548387097, | |
"eval_f1": 0.14204632612363857, | |
"eval_loss": 3.016810655593872, | |
"eval_runtime": 11.835, | |
"eval_samples_per_second": 5.239, | |
"eval_steps_per_second": 0.676, | |
"step": 3 | |
}, | |
{ | |
"epoch": 2.064516129032258, | |
"grad_norm": 213.42514038085938, | |
"learning_rate": 3.7500000000000003e-05, | |
"loss": 2.6626, | |
"step": 4 | |
}, | |
{ | |
"epoch": 2.064516129032258, | |
"eval_accuracy": 0.7096774193548387, | |
"eval_f1": 0.49412878787878783, | |
"eval_loss": 1.579105257987976, | |
"eval_runtime": 11.9232, | |
"eval_samples_per_second": 5.2, | |
"eval_steps_per_second": 0.671, | |
"step": 4 | |
}, | |
{ | |
"epoch": 2.5806451612903225, | |
"grad_norm": 67.95157623291016, | |
"learning_rate": 3.125e-05, | |
"loss": 1.9023, | |
"step": 5 | |
}, | |
{ | |
"epoch": 2.5806451612903225, | |
"eval_accuracy": 0.532258064516129, | |
"eval_f1": 0.29178885630498536, | |
"eval_loss": 2.1996307373046875, | |
"eval_runtime": 11.8295, | |
"eval_samples_per_second": 5.241, | |
"eval_steps_per_second": 0.676, | |
"step": 5 | |
}, | |
{ | |
"epoch": 3.096774193548387, | |
"grad_norm": 129.76478576660156, | |
"learning_rate": 2.5e-05, | |
"loss": 1.5272, | |
"step": 6 | |
}, | |
{ | |
"epoch": 3.096774193548387, | |
"eval_accuracy": 0.7258064516129032, | |
"eval_f1": 0.49393939393939396, | |
"eval_loss": 1.7026771306991577, | |
"eval_runtime": 11.8735, | |
"eval_samples_per_second": 5.222, | |
"eval_steps_per_second": 0.674, | |
"step": 6 | |
}, | |
{ | |
"epoch": 3.6129032258064515, | |
"grad_norm": 98.3275375366211, | |
"learning_rate": 1.8750000000000002e-05, | |
"loss": 1.9334, | |
"step": 7 | |
}, | |
{ | |
"epoch": 3.6129032258064515, | |
"eval_accuracy": 0.8709677419354839, | |
"eval_f1": 0.6044973544973545, | |
"eval_loss": 1.2077761888504028, | |
"eval_runtime": 11.8388, | |
"eval_samples_per_second": 5.237, | |
"eval_steps_per_second": 0.676, | |
"step": 7 | |
}, | |
{ | |
"epoch": 4.129032258064516, | |
"grad_norm": 35.63172149658203, | |
"learning_rate": 1.25e-05, | |
"loss": 1.0846, | |
"step": 8 | |
}, | |
{ | |
"epoch": 4.129032258064516, | |
"eval_accuracy": 0.8870967741935484, | |
"eval_f1": 0.6157904654230263, | |
"eval_loss": 0.998982310295105, | |
"eval_runtime": 11.878, | |
"eval_samples_per_second": 5.22, | |
"eval_steps_per_second": 0.674, | |
"step": 8 | |
}, | |
{ | |
"epoch": 4.645161290322581, | |
"grad_norm": 37.11382293701172, | |
"learning_rate": 6.25e-06, | |
"loss": 1.1097, | |
"step": 9 | |
}, | |
{ | |
"epoch": 4.645161290322581, | |
"eval_accuracy": 0.9032258064516129, | |
"eval_f1": 0.6276836158192091, | |
"eval_loss": 0.7891162633895874, | |
"eval_runtime": 11.9185, | |
"eval_samples_per_second": 5.202, | |
"eval_steps_per_second": 0.671, | |
"step": 9 | |
}, | |
{ | |
"epoch": 5.161290322580645, | |
"grad_norm": 37.26462936401367, | |
"learning_rate": 0.0, | |
"loss": 0.7589, | |
"step": 10 | |
}, | |
{ | |
"epoch": 5.161290322580645, | |
"eval_accuracy": 0.8870967741935484, | |
"eval_f1": 0.6160075329566855, | |
"eval_loss": 0.6839671730995178, | |
"eval_runtime": 11.9174, | |
"eval_samples_per_second": 5.202, | |
"eval_steps_per_second": 0.671, | |
"step": 10 | |
}, | |
{ | |
"epoch": 5.161290322580645, | |
"step": 10, | |
"total_flos": 1.0010298170105856e+16, | |
"train_loss": 7.923555982112885, | |
"train_runtime": 832.0871, | |
"train_samples_per_second": 2.932, | |
"train_steps_per_second": 0.012 | |
} | |
], | |
"logging_steps": 1, | |
"max_steps": 10, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 10, | |
"save_steps": 500, | |
"total_flos": 1.0010298170105856e+16, | |
"train_batch_size": 8, | |
"trial_name": null, | |
"trial_params": null | |
} | |