|
{ |
|
"best_metric": 0.5859972834587097, |
|
"best_model_checkpoint": "facial_emotions_image_detection/checkpoint-15740", |
|
"epoch": 10.0, |
|
"eval_steps": 500, |
|
"global_step": 15740, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 9.7131931166348e-06, |
|
"loss": 1.5691, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 9.394518801784576e-06, |
|
"loss": 1.1566, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 9.075844486934355e-06, |
|
"loss": 1.005, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.6603098927294399, |
|
"eval_loss": 1.0014679431915283, |
|
"eval_runtime": 113.4351, |
|
"eval_samples_per_second": 110.944, |
|
"eval_steps_per_second": 13.876, |
|
"step": 1574 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 8.757170172084131e-06, |
|
"loss": 0.9148, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 8.438495857233908e-06, |
|
"loss": 0.8522, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 8.119821542383684e-06, |
|
"loss": 0.8063, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.7133889551052841, |
|
"eval_loss": 0.8330217003822327, |
|
"eval_runtime": 108.2894, |
|
"eval_samples_per_second": 116.216, |
|
"eval_steps_per_second": 14.535, |
|
"step": 3148 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 7.80114722753346e-06, |
|
"loss": 0.7566, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 7.482472912683239e-06, |
|
"loss": 0.7097, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 7.163798597833015e-06, |
|
"loss": 0.7016, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.7423917361938817, |
|
"eval_loss": 0.7428602576255798, |
|
"eval_runtime": 107.858, |
|
"eval_samples_per_second": 116.681, |
|
"eval_steps_per_second": 14.593, |
|
"step": 4722 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 6.8451242829827925e-06, |
|
"loss": 0.6608, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 6.526449968132569e-06, |
|
"loss": 0.6235, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 6.207775653282346e-06, |
|
"loss": 0.6138, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.7507350019864919, |
|
"eval_loss": 0.7060629725456238, |
|
"eval_runtime": 109.3785, |
|
"eval_samples_per_second": 115.059, |
|
"eval_steps_per_second": 14.39, |
|
"step": 6296 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 5.889101338432123e-06, |
|
"loss": 0.5723, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"learning_rate": 5.5704270235819e-06, |
|
"loss": 0.5429, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 4.76, |
|
"learning_rate": 5.251752708731677e-06, |
|
"loss": 0.5403, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.7726658720699245, |
|
"eval_loss": 0.6644242405891418, |
|
"eval_runtime": 108.6283, |
|
"eval_samples_per_second": 115.854, |
|
"eval_steps_per_second": 14.49, |
|
"step": 7870 |
|
}, |
|
{ |
|
"epoch": 5.08, |
|
"learning_rate": 4.933078393881454e-06, |
|
"loss": 0.5112, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 5.4, |
|
"learning_rate": 4.61440407903123e-06, |
|
"loss": 0.4647, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 5.72, |
|
"learning_rate": 4.295729764181008e-06, |
|
"loss": 0.4584, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.7871275327771157, |
|
"eval_loss": 0.6358364820480347, |
|
"eval_runtime": 108.2696, |
|
"eval_samples_per_second": 116.238, |
|
"eval_steps_per_second": 14.538, |
|
"step": 9444 |
|
}, |
|
{ |
|
"epoch": 6.04, |
|
"learning_rate": 3.977055449330784e-06, |
|
"loss": 0.4567, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 6.35, |
|
"learning_rate": 3.6583811344805616e-06, |
|
"loss": 0.4134, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 6.67, |
|
"learning_rate": 3.339706819630338e-06, |
|
"loss": 0.3957, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 6.99, |
|
"learning_rate": 3.021032504780115e-06, |
|
"loss": 0.3991, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.800238379022646, |
|
"eval_loss": 0.6062497496604919, |
|
"eval_runtime": 108.4345, |
|
"eval_samples_per_second": 116.061, |
|
"eval_steps_per_second": 14.516, |
|
"step": 11018 |
|
}, |
|
{ |
|
"epoch": 7.31, |
|
"learning_rate": 2.702358189929892e-06, |
|
"loss": 0.3545, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 7.62, |
|
"learning_rate": 2.3836838750796687e-06, |
|
"loss": 0.3538, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 7.94, |
|
"learning_rate": 2.0650095602294456e-06, |
|
"loss": 0.3544, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.8045292014302742, |
|
"eval_loss": 0.6008493900299072, |
|
"eval_runtime": 110.3834, |
|
"eval_samples_per_second": 114.012, |
|
"eval_steps_per_second": 14.259, |
|
"step": 12592 |
|
}, |
|
{ |
|
"epoch": 8.26, |
|
"learning_rate": 1.7463352453792225e-06, |
|
"loss": 0.3183, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 8.58, |
|
"learning_rate": 1.4276609305289997e-06, |
|
"loss": 0.3218, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 8.89, |
|
"learning_rate": 1.1089866156787763e-06, |
|
"loss": 0.3184, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.8116805721096544, |
|
"eval_loss": 0.590340256690979, |
|
"eval_runtime": 108.9419, |
|
"eval_samples_per_second": 115.52, |
|
"eval_steps_per_second": 14.448, |
|
"step": 14166 |
|
}, |
|
{ |
|
"epoch": 9.21, |
|
"learning_rate": 7.903123008285534e-07, |
|
"loss": 0.3054, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 9.53, |
|
"learning_rate": 4.7163798597833015e-07, |
|
"loss": 0.2813, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 9.85, |
|
"learning_rate": 1.5296367112810708e-07, |
|
"loss": 0.2984, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.8162097735399285, |
|
"eval_loss": 0.5859972834587097, |
|
"eval_runtime": 111.4622, |
|
"eval_samples_per_second": 112.908, |
|
"eval_steps_per_second": 14.121, |
|
"step": 15740 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 15740, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"total_flos": 3.900966581033497e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|